diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2021-12-17 11:33:33 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2021-12-17 11:33:33 +0100 |
commit | 1758047057dbe329be712a31b79db7151b5871f8 (patch) | |
tree | 00203eb55328f2feda70b3d37c964287b364796f /drivers/gpu | |
parent | bcae3af286f49bf4f6cda03f165fbe530f4a6bed (diff) | |
parent | 1c405ca11bf563de1725e5ecfb4a74ee289d2ee9 (diff) | |
download | linux-1758047057dbe329be712a31b79db7151b5871f8.tar.bz2 |
Merge drm/drm-next into drm-misc-next-fixes
Backmerging to bring drm-misc-next-fixes up to the latest state for
the current release cycle.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/gpu')
1314 files changed, 108420 insertions, 34635 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 0039df26854b..b1f22e457fd0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -8,6 +8,7 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA + select DRM_NOMODESET select DRM_PANEL_ORIENTATION_QUIRKS select HDMI select FB_CMDLINE @@ -211,20 +212,13 @@ config DRM_TTM_HELPER Helpers for ttm-based gem objects config DRM_GEM_CMA_HELPER - bool + tristate depends on DRM help Choose this if you need the GEM CMA helper functions -config DRM_KMS_CMA_HELPER - bool - depends on DRM - select DRM_GEM_CMA_HELPER - help - Choose this if you need the KMS CMA helper functions - config DRM_GEM_SHMEM_HELPER - bool + tristate depends on DRM && MMU help Choose this if you need the GEM shmem helper functions @@ -394,6 +388,8 @@ source "drivers/gpu/drm/xlnx/Kconfig" source "drivers/gpu/drm/gud/Kconfig" +source "drivers/gpu/drm/sprd/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV @@ -492,6 +488,15 @@ config DRM_EXPORT_FOR_TESTS config DRM_PANEL_ORIENTATION_QUIRKS tristate +# Separate option because nomodeset parameter is global and expected built-in +config DRM_NOMODESET + bool + default n + config DRM_LIB_RANDOM bool default n + +config DRM_PRIVACY_SCREEN + bool + default n diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0dff40bb863c..301a44dc18e3 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -4,37 +4,44 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. drm-y := drm_aperture.o drm_auth.o drm_cache.o \ - drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \ + drm_file.o drm_gem.o drm_ioctl.o \ drm_drv.o \ - drm_sysfs.o drm_hashtab.o drm_mm.o \ + drm_sysfs.o drm_mm.o \ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \ - drm_encoder_slave.o \ drm_trace_points.o drm_prime.o \ - drm_rect.o drm_vma_manager.o drm_flip_work.o \ + drm_vma_manager.o \ drm_modeset_lock.o drm_atomic.o drm_bridge.o \ drm_framebuffer.o drm_connector.o drm_blend.o \ drm_encoder.o drm_mode_object.o drm_property.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \ drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \ - drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ + drm_client_modeset.o drm_atomic_uapi.o \ drm_managed.o drm_vblank_work.o drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \ - drm_legacy_misc.o drm_lock.o drm_memory.o drm_scatter.o \ - drm_vm.o + drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \ + drm_memory.o drm_scatter.o drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o -drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o -drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o drm-$(CONFIG_DRM_PANEL) += drm_panel.o drm-$(CONFIG_OF) += drm_of.o drm-$(CONFIG_PCI) += drm_pci.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o +obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o + +drm_cma_helper-y := drm_gem_cma_helper.o +drm_cma_helper-$(CONFIG_DRM_KMS_HELPER) += drm_fb_cma_helper.o +obj-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_cma_helper.o + +drm_shmem_helper-y := drm_gem_shmem_helper.o +obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o + drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o @@ -42,18 +49,18 @@ drm_ttm_helper-y := drm_gem_ttm_helper.o obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \ - drm_dsc.o drm_probe_helper.o \ + drm_dsc.o drm_encoder_slave.o drm_flip_work.o drm_hdcp.o \ + drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ drm_simple_kms_helper.o drm_modeset_helper.o \ drm_scdc_helper.o drm_gem_atomic_helper.o \ drm_gem_framebuffer_helper.o \ drm_atomic_state_helper.o drm_damage_helper.o \ - drm_format_helper.o drm_self_refresh_helper.o + drm_format_helper.o drm_self_refresh_helper.o drm_rect.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o -drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o drm_kms_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o @@ -127,3 +134,4 @@ obj-$(CONFIG_DRM_TIDSS) += tidss/ obj-y += xlnx/ obj-y += gud/ obj-$(CONFIG_DRM_HYPERV) += hyperv/ +obj-$(CONFIG_DRM_SPRD) += sprd/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 8d0748184a14..7fedbb725e17 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -45,7 +45,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ - amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ + amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ @@ -73,10 +73,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \ - nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \ - beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o + vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \ + nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 148f6c3343ab..bcfdb63b1d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -307,6 +307,8 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->ip_blocks[i].status.late_initialized = true; } + amdgpu_ras_set_error_query_ready(adev, true); + amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dc3c6b3a00e5..9f017663ac50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -205,6 +205,7 @@ extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; extern int amdgpu_bad_page_threshold; +extern bool amdgpu_ignore_bad_page_threshold; extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; @@ -457,7 +458,6 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_abo; - struct dma_fence *excl; unsigned shared_count; struct dma_fence **shared; struct dma_fence_cb cb; @@ -744,6 +744,7 @@ enum amd_hw_ip_block_type { UVD_HWIP, VCN_HWIP = UVD_HWIP, JPEG_HWIP = VCN_HWIP, + VCN1_HWIP, VCE_HWIP, DF_HWIP, DCE_HWIP, @@ -755,10 +756,15 @@ enum amd_hw_ip_block_type { CLK_HWIP, UMC_HWIP, RSMU_HWIP, + XGMI_HWIP, + DCI_HWIP, MAX_HWIP }; -#define HWIP_MAX_INSTANCE 8 +#define HWIP_MAX_INSTANCE 10 + +#define HW_ID_MAX 300 +#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv)) struct amd_powerplay { void *pp_handle; @@ -830,6 +836,7 @@ struct amdgpu_device { struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct debugfs_blob_wrapper debugfs_vbios_blob; + struct debugfs_blob_wrapper debugfs_discovery_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -1078,8 +1085,6 @@ struct amdgpu_device { char product_name[32]; char serial[20]; - struct amdgpu_autodump autodump; - atomic_t throttling_logging_enabled; struct ratelimit_state throttling_logging_rs; uint32_t ras_hw_enabled; @@ -1087,8 +1092,10 @@ struct amdgpu_device { bool no_hw_access; struct pci_saved_state *pci_state; + pci_channel_state_t pci_channel_state; struct amdgpu_reset_control *reset_cntl; + uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE]; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3003ee1c9487..46cf48b3904a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -31,6 +31,8 @@ #include <linux/dma-buf.h> #include "amdgpu_xgmi.h" #include <uapi/linux/kfd_ioctl.h> +#include "amdgpu_ras.h" +#include "amdgpu_umc.h" /* Total memory size in system memory and all GPU VRAM. Used to * estimate worst case amount of memory to reserve for page tables @@ -70,8 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) if (!kfd_initialized) return; - adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, - adev->pdev, adev->asic_type, vf); + adev->kfd.dev = kgd2kfd_probe(adev, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -192,6 +193,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) kgd2kfd_suspend(adev->kfd.dev, run_pm); } +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume_iommu(adev->kfd.dev); + + return r; +} + int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) { int r = 0; @@ -222,19 +233,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) return r; } -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (amdgpu_device_should_recover_gpu(adev)) amdgpu_device_gpu_recover(adev, NULL); } -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool cp_mqd_gfx9) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_param bp; int r; @@ -303,7 +311,7 @@ allocate_mem_reserve_bo_failed: return r; } -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; @@ -314,10 +322,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, void **mem_obj) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_user *ubo; struct amdgpu_bo_param bp; @@ -344,18 +351,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, return 0; } -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; amdgpu_bo_unref(&bo); } -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - switch (type) { case KGD_ENGINE_PFP: return adev->gfx.pfp_fw_version; @@ -388,11 +393,9 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, return 0; } -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - memset(mem_info, 0, sizeof(*mem_info)); mem_info->local_mem_size_public = adev->gmc.visible_vram_size; @@ -417,19 +420,15 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, mem_info->mem_clk_max = 100; } -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->gfx.funcs->get_gpu_clock_counter) return adev->gfx.funcs->get_gpu_clock_counter(adev); return 0; } -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - /* the sclk is in quantas of 10kHz */ if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; @@ -439,9 +438,8 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) return 100; } -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_cu_info acu_info = adev->gfx.cu_info; memset(cu_info, 0, sizeof(*cu_info)); @@ -462,13 +460,12 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) cu_info->lds_size = acu_info.lds_size; } -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dma_buf_kgd, +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct dma_buf *dma_buf; struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -496,8 +493,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, goto out_put; r = 0; - if (dma_buf_kgd) - *dma_buf_kgd = (struct kgd_dev *)adev; + if (dmabuf_adev) + *dmabuf_adev = adev; if (bo_size) *bo_size = amdgpu_bo_size(bo); if (metadata_buffer) @@ -517,32 +514,18 @@ out_put: return r; } -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.xgmi.hive_id; -} - -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd) +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->unique_id; -} - -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) -{ - struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; - struct amdgpu_device *adev = (struct amdgpu_device *)dst; + struct amdgpu_device *peer_adev = src; + struct amdgpu_device *adev = dst; int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev); if (ret < 0) { @@ -554,16 +537,18 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s return (uint8_t)ret; } -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min) +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev; + struct amdgpu_device *adev = dst, *peer_adev; int num_links; if (adev->asic_type != CHIP_ALDEBARAN) return 0; if (src) - peer_adev = (struct amdgpu_device *)src; + peer_adev = src; /* num links returns 0 for indirect peers since indirect route is unknown. */ num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); @@ -578,9 +563,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev return (num_links * 16 * 25000)/BITS_PER_BYTE; } -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev; int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : fls(adev->pm.pcie_mlw_mask)) - 1; int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & @@ -636,39 +620,11 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; } -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rmmio_remap.bus_addr; -} - -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gds.gws_size; -} - -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rev_id; -} - -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.noretry; -} - -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_job *job; struct amdgpu_ib *ib; struct amdgpu_ring *ring; @@ -719,10 +675,8 @@ err: return ret; } -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); @@ -736,10 +690,9 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->family == AMDGPU_FAMILY_AI) { int i; @@ -752,10 +705,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) return 0; } -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type) +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; bool all_hub = false; if (adev->family == AMDGPU_FAMILY_AI) @@ -764,9 +716,18 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); } -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - return adev->have_atomics_support; } + +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev) +{ + struct ras_err_data err_data = {0, 0, 0, NULL}; + + /* CPU MCA will handle page retirement if connected_to_cpu is 1 */ + if (!adev->gmc.xgmi.connected_to_cpu) + amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL); + else + amdgpu_amdkfd_gpu_reset(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ec028cf963f5..fcbc8a9c9e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -137,20 +137,23 @@ int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type); +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -158,7 +161,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); @@ -197,37 +200,36 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) } #endif /* Shared API */ -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj); -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj); +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, + void **mem_obj); +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info); -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dmabuf_kgd, +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, + struct kfd_cu_info *cu_info); +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags); -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min); -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev); +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src); +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min); +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -257,43 +259,55 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); (&((struct amdgpu_fpriv *) \ ((struct drm_file *)(drm_priv))->driver_priv)->vm) -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, + bool *table_freed); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size); +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem); + int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *info); -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dmabuf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev); #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); + +/** + * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released + * + * Allows KFD to release its resources associated with the GEM object. + */ +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline @@ -308,7 +322,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } static inline -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif @@ -320,13 +334,13 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, #if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf); +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); +int kgd2kfd_resume_iommu(struct kfd_dev *kfd); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); @@ -344,8 +358,7 @@ static inline void kgd2kfd_exit(void) } static inline -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf) +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { return NULL; } @@ -365,6 +378,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { } +static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) +{ + return 0; +} + static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 5a7f680bcb3f..abe93b3ff765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -57,11 +57,6 @@ (*dump)[i++][1] = RREG32(addr); \ } while (0) -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) { return (struct v9_sdma_mqd *)mqd; @@ -123,10 +118,9 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, return sdma_rlc_reg_offset; } -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -193,11 +187,10 @@ int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -225,9 +218,9 @@ int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -244,10 +237,9 @@ bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h index ce08131b7b5f..756c1a5679c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h @@ -20,11 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd); +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 960acf68150a..7b7f4b2764c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -39,37 +39,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -81,33 +70,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -150,22 +135,21 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -218,12 +202,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -231,7 +214,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -296,16 +279,15 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -313,7 +295,7 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -349,16 +331,15 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -372,13 +353,13 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -386,10 +367,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -456,11 +436,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -488,15 +467,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -506,13 +485,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -529,12 +507,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -548,7 +525,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; #endif - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -633,20 +610,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -683,11 +659,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -696,12 +671,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -710,11 +685,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -735,18 +709,16 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -757,12 +729,10 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings(struct kgd_dev *kgd, +static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -781,7 +751,7 @@ static void program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v10_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index dac0d751d5af..1f37d3574001 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -38,37 +38,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -80,34 +69,30 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, +static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } /* ATC is defeatured on Sienna_Cichlid */ -static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, +static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; /* Mapping vmid to pasid also for IH block */ @@ -118,22 +103,21 @@ static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, return 0; } -static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) +static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -188,12 +172,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -201,7 +184,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -281,16 +264,15 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -298,7 +280,7 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -334,16 +316,15 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int hqd_dump_v10_3(struct kgd_dev *kgd, +static int hqd_dump_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -357,13 +338,13 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -371,10 +352,9 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, return 0; } -static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -441,11 +421,10 @@ static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, return 0; } -static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, +static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -473,15 +452,15 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, return 0; } -static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -491,13 +470,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) +static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -514,18 +493,17 @@ static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) return false; } -static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -555,20 +533,19 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, if (time_after(jiffies, end_jiffies)) { pr_err("cp queue pipe %d queue %d preemption failed\n", pipe_id, queue_id); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -606,12 +583,12 @@ static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, } -static int address_watch_disable_v10_3(struct kgd_dev *kgd) +static int address_watch_disable_v10_3(struct amdgpu_device *adev) { return 0; } -static int address_watch_execute_v10_3(struct kgd_dev *kgd, +static int address_watch_execute_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -620,11 +597,10 @@ static int address_watch_execute_v10_3(struct kgd_dev *kgd, return 0; } -static int wave_control_execute_v10_3(struct kgd_dev *kgd, +static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -645,28 +621,24 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd, return 0; } -static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, +static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* SDMA is on gfxhub as well for Navi1* series */ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, +static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -685,15 +657,14 @@ static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } #if 0 -uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, +uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev, uint32_t trap_debug_wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; uint32_t orig_wave_cntl_value; uint32_t orig_stall_vmid; @@ -720,10 +691,8 @@ uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) +uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->grbm_idx_mutex); WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); @@ -733,11 +702,10 @@ uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) return 0; } -uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev, uint32_t trap_override, uint32_t trap_mask) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -762,11 +730,10 @@ uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; bool is_stall_mode; bool is_mode_set; @@ -805,16 +772,14 @@ uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ -void get_iq_wait_times_v10_3(struct kgd_dev *kgd, +void get_iq_wait_times_v10_3(struct amdgpu_device *adev, uint32_t *wait_times) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); } -void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, +void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b91d27e39bad..36528dad7684 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -82,68 +82,54 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -165,21 +151,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -207,12 +192,11 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) return (struct cik_sdma_rlc_registers *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -220,7 +204,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -239,25 +223,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (35+4) #define DUMP_REG(addr) do { \ @@ -271,7 +254,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -281,7 +264,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -289,10 +272,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -345,11 +327,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -372,15 +353,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -390,13 +371,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -412,12 +392,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -426,7 +405,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); switch (reset_type) { @@ -504,20 +483,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -551,9 +529,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; unsigned int i; @@ -571,13 +548,12 @@ static int kgd_address_watch_disable(struct kgd_dev *kgd) return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; cntl.u32All = cntl_val; @@ -602,11 +578,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data; mutex_lock(&adev->grbm_idx_mutex); @@ -627,18 +602,17 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -646,21 +620,17 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; @@ -676,10 +646,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * @vmid: vmid pointer * read vmid from register (CIK). */ -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) +static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 5ce0ce704a21..52832cd69a93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -39,68 +39,54 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -123,21 +109,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -165,12 +150,11 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) return (struct vi_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -178,7 +162,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -206,7 +190,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * on ASICs that do not support context-save. * EOP writes/reads can start anywhere in the ring. */ - if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { + if (adev->asic_type != CHIP_TONGA) { WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); @@ -226,25 +210,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (54+4) #define DUMP_REG(addr) do { \ @@ -258,7 +241,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -268,7 +251,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -276,10 +259,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -331,11 +313,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_VI_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -367,15 +348,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -385,13 +366,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -407,12 +387,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -422,7 +401,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); @@ -502,20 +481,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -549,11 +527,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -561,12 +538,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -575,11 +552,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -600,28 +576,24 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bcc1cbeb8799..ddfe7aff919d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,37 +46,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -88,33 +77,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -171,22 +156,21 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -233,19 +217,18 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -308,16 +291,15 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); - release_queue(kgd); + release_queue(adev); return 0; } -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v9_mqd *m; uint32_t mec, pipe; @@ -325,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -361,16 +343,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -384,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -398,10 +379,9 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -468,11 +448,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -500,15 +479,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); if (act) { low = lower_32_bits(queue_address >> 8); @@ -518,13 +497,12 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -541,12 +519,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -555,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -584,20 +561,19 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -634,11 +610,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -647,12 +622,12 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev) { return 0; } -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -661,11 +636,10 @@ int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, return 0; } -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -686,18 +660,16 @@ int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, return 0; } -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -804,7 +776,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * * Reading registers referenced above involves programming GRBM appropriately */ -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu) { int qidx; @@ -818,10 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, int pasid_tmp; int max_queue_cnt; int vmid_wave_cnt = 0; - struct amdgpu_device *adev; DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); - adev = get_amdgpu_device(kgd); lock_spi_csq_mutexes(adev); soc15_grbm_select(adev, 1, 0, 0, 0); @@ -882,12 +852,10 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, adev->gfx.cu_info.max_waves_per_simd; } -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -905,7 +873,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index c63591106879..24be49df26fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -22,48 +22,49 @@ -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id); +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id); +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd); -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev); +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu); -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2d6b2d77b738..5df89a295177 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -60,12 +60,6 @@ static const char * const domain_bit_to_string[] = { static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); - -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { @@ -126,8 +120,19 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size) PAGE_ALIGN(size); } +/** + * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size + * of buffer including any reserved for control structures + * + * @adev: Device to which allocated BO belongs to + * @size: Size of buffer, in bytes, encapsulated by B0. This should be + * equivalent to amdgpu_bo_size(BO) + * @alloc_flag: Flag used in allocating a BO as noted above + * + * Return: returns -ENOMEM in case of error, ZERO otherwise + */ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); @@ -137,20 +142,24 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, acc_size = amdgpu_amdkfd_acc_size(size); vram_needed = 0; - if (domain == AMDGPU_GEM_DOMAIN_GTT) { - /* TTM GTT memory */ + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size + size; - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { - /* Userptr */ + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + system_mem_needed = acc_size; + ttm_mem_needed = acc_size; + vram_needed = size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size; - } else { - /* VRAM and SG */ + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { system_mem_needed = acc_size; ttm_mem_needed = acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - vram_needed = size; + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + return -ENOMEM; } spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -166,62 +175,72 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (adev->kfd.vram_used + vram_needed > adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; - } else { - kfd_mem_limit.system_mem_used += system_mem_needed; - kfd_mem_limit.ttm_mem_used += ttm_mem_needed; - adev->kfd.vram_used += vram_needed; + goto release; } + /* Update memory accounting by decreasing available system + * memory, TTM memory and GPU memory as computed above + */ + adev->kfd.vram_used += vram_needed; + kfd_mem_limit.system_mem_used += system_mem_needed; + kfd_mem_limit.ttm_mem_used += ttm_mem_needed; + +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); return ret; } static void unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { size_t acc_size; acc_size = amdgpu_amdkfd_acc_size(size); spin_lock(&kfd_mem_limit.mem_limit_lock); - if (domain == AMDGPU_GEM_DOMAIN_GTT) { + + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= (acc_size + size); - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + kfd_mem_limit.system_mem_used -= acc_size; + kfd_mem_limit.ttm_mem_used -= acc_size; + adev->kfd.vram_used -= size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= acc_size; - } else { + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { kfd_mem_limit.system_mem_used -= acc_size; kfd_mem_limit.ttm_mem_used -= acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - adev->kfd.vram_used -= size; - WARN_ONCE(adev->kfd.vram_used < 0, - "kfd VRAM memory accounting unbalanced"); - } + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + goto release; } - WARN_ONCE(kfd_mem_limit.system_mem_used < 0, - "kfd system memory accounting unbalanced"); + + WARN_ONCE(adev->kfd.vram_used < 0, + "KFD VRAM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, - "kfd TTM memory accounting unbalanced"); + "KFD TTM memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.system_mem_used < 0, + "KFD system memory accounting unbalanced"); +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); } -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u32 domain = bo->preferred_domains; - bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); + u32 alloc_flags = bo->kfd_bo->alloc_flags; + u64 size = amdgpu_bo_size(bo); - if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { - domain = AMDGPU_GEM_DOMAIN_CPU; - sg = false; - } + unreserve_mem_limit(adev, size, alloc_flags); - unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + kfree(bo->kfd_bo); } - /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's * reservation object. * @@ -563,6 +582,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); sg_free_table(ttm->sg); + kfree(ttm->sg); ttm->sg = NULL; } @@ -643,12 +663,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, if (IS_ERR(gobj)) return PTR_ERR(gobj); - /* Import takes an extra reference on the dmabuf. Drop it now to - * avoid leaking it. We only need the one reference in - * kgd_mem->dmabuf. - */ - dma_buf_put(mem->dmabuf); - *bo = gem_to_amdgpu_bo(gobj); (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; (*bo)->parent = amdgpu_bo_ref(mem->bo); @@ -733,14 +747,19 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, } /* Add BO to VM internal data structures */ + ret = amdgpu_bo_reserve(bo[i], false); + if (ret) { + pr_debug("Unable to reserve BO during memory attach"); + goto unwind; + } attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + amdgpu_bo_unreserve(bo[i]); if (unlikely(!attachment[i]->bo_va)) { ret = -ENOMEM; pr_err("Failed to add BO object to VM. ret == %d\n", ret); goto unwind; } - attachment[i]->va = va; attachment[i]->pte_flags = get_pte_flags(adev, mem); attachment[i]->adev = adev; @@ -756,7 +775,9 @@ unwind: if (!attachment[i]) continue; if (attachment[i]->bo_va) { + amdgpu_bo_reserve(bo[i], true); amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + amdgpu_bo_unreserve(bo[i]); list_del(&attachment[i]->list); } if (bo[i]) @@ -1268,12 +1289,60 @@ create_evict_fence_fail: return ret; } -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +/** + * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria + * @bo: Handle of buffer object being pinned + * @domain: Domain into which BO should be pinned + * + * - USERPTR BOs are UNPINNABLE and will return error + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count incremented. It is valid to PIN a BO multiple times + * + * Return: ZERO if successful in pinning, Non-Zero in case of error. + */ +static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return ret; + + ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); + if (ret) + pr_err("Error in Pinning BO to domain: %d\n", domain); + + amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); + amdgpu_bo_unreserve(bo); + + return ret; +} + +/** + * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria + * @bo: Handle of buffer object being unpinned + * + * - Is a illegal request for USERPTR BOs and is ignored + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count decremented. Calls to UNPIN must balance calls to PIN + */ +static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return; + + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; int ret; @@ -1349,12 +1418,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !drm_priv)) + if (WARN_ON(!adev || !drm_priv)) return; avm = drm_priv_to_vm(drm_priv); @@ -1382,17 +1451,16 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct drm_gem_object *gobj; + struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1450,7 +1518,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1496,20 +1564,34 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (offset) *offset = amdgpu_bo_mmap_offset(bo); + if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); + if (ret) { + pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); + goto err_pin_bo; + } + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + } + return 0; allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); +err_pin_bo: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - amdgpu_bo_unref(&bo); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - unreserve_mem_limit(adev, size, alloc_domain, !!sg); + unreserve_mem_limit(adev, size, flags); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); @@ -1519,7 +1601,7 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; @@ -1532,6 +1614,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( bool is_imported = false; mutex_lock(&mem->lock); + + /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */ + if (mem->alloc_flags & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); + } + mapped_to_gpu_memory = mem->mapped_to_gpu_memory; is_imported = mem->is_imported; mutex_unlock(&mem->lock); @@ -1567,12 +1657,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - ret = unreserve_bo_and_vms(&ctx, false, false); - /* Remove from VM internal data structures */ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) kfd_mem_detach(entry); + ret = unreserve_bo_and_vms(&ctx, false, false); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1599,18 +1689,21 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); if (mem->dmabuf) dma_buf_put(mem->dmabuf); - drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); - kfree(mem); + + /* If this releases the last reference, it will end up calling + * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why + * this needs to be the last call here. + */ + drm_gem_object_put(&mem->bo->tbo.base); return ret; } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, bool *table_freed) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; @@ -1737,7 +1830,7 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; @@ -1798,7 +1891,7 @@ out: } int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) { struct amdgpu_sync sync; int ret; @@ -1814,7 +1907,7 @@ int amdgpu_amdkfd_gpuvm_sync_memory( return ret; } -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size) { int ret; @@ -1870,12 +1963,20 @@ bo_reserve_failed: return ret; } -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, - struct kfd_vm_fault_info *mem) +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem) { - struct amdgpu_device *adev; + struct amdgpu_bo *bo = mem->bo; - adev = (struct amdgpu_device *)kgd; + amdgpu_bo_reserve(bo, true); + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, + struct kfd_vm_fault_info *mem) +{ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; mb(); @@ -1884,13 +1985,12 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, return 0; } -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dma_buf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -2040,19 +2140,26 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Get updated user pages */ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); if (ret) { - pr_debug("%s: Failed to get user pages: %d\n", - __func__, ret); + pr_debug("Failed %d to get user pages\n", ret); + + /* Return -EFAULT bad address error as success. It will + * fail later with a VM fault if the GPU tries to access + * it. Better than hanging indefinitely with stalled + * user mode queues. + * + * Return other error -EBUSY or -ENOMEM to retry restore + */ + if (ret != -EFAULT) + return ret; + } else { - /* Return error -EBUSY or -ENOMEM, retry restore */ - return ret; + /* + * FIXME: Cannot ignore the return code, must hold + * notifier_lock + */ + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); } - /* - * FIXME: Cannot ignore the return code, must hold - * notifier_lock - */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); - /* Mark the BO as valid unless it was invalidated * again concurrently. */ @@ -2510,11 +2617,9 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) } /* Returns GPU-specific tiling mode information */ -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - config->gb_addr_config = adev->gfx.config.gb_addr_config; config->tile_config_ptr = adev->gfx.config.tile_mode_array; config->num_tile_configs = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 96b7bb13a2dd..12a6b1c99c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2); + + tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & + ATOM_S2_CURRENT_BL_LEVEL_MASK; + + WREG32(adev->bios_scratch_reg_offset + 2, tmp); +} + bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) { u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8cc0222dba19..27e74b1fc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 15c45b2a3983..714178f1b6c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref) int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, - unsigned num_entries, struct amdgpu_bo_list **result) + size_t num_entries, struct amdgpu_bo_list **result) { unsigned last_entry = 0, first_userptr = num_entries; struct amdgpu_bo_list_entry *array; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index c905a4cfc173..044b41f0bfd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -61,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, struct drm_amdgpu_bo_list_entry *info, - unsigned num_entries, + size_t num_entries, struct amdgpu_bo_list **list); static inline struct amdgpu_bo_list_entry * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index b9c11c2b2885..0de66f59adb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); + amdgpu_get_native_mode(connector); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index e7a010b7ca1f..468003583b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_JPEG] = 1, }; +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + case AMDGPU_CTX_PRIORITY_VERY_LOW: + case AMDGPU_CTX_PRIORITY_LOW: + case AMDGPU_CTX_PRIORITY_NORMAL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return true; + default: + return false; + } +} + +static enum drm_sched_priority +amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + return DRM_SCHED_PRIORITY_UNSET; + + case AMDGPU_CTX_PRIORITY_VERY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_LOW: + return DRM_SCHED_PRIORITY_MIN; + + case AMDGPU_CTX_PRIORITY_NORMAL: + return DRM_SCHED_PRIORITY_NORMAL; + + case AMDGPU_CTX_PRIORITY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + /* This should not happen as we sanitized userspace provided priority + * already, WARN if this happens. + */ + default: + WARN(1, "Invalid context priority %d\n", ctx_prio); + return DRM_SCHED_PRIORITY_NORMAL; + } + +} + static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum drm_sched_priority priority) + int32_t priority) { - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) + if (!amdgpu_ctx_priority_is_valid(priority)) return -EINVAL; /* NORMAL and below are accessible by everyone */ - if (priority <= DRM_SCHED_PRIORITY_NORMAL) + if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } -static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio) { switch (prio) { - case DRM_SCHED_PRIORITY_HIGH: - case DRM_SCHED_PRIORITY_KERNEL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: return AMDGPU_GFX_PIPE_PRIO_NORMAL; } } -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, - enum drm_sched_priority prio, - u32 hw_ip) +static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio) { + switch (prio) { + case AMDGPU_CTX_PRIORITY_HIGH: + return AMDGPU_RING_PRIO_1; + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip) +{ + struct amdgpu_device *adev = ctx->adev; + int32_t ctx_prio; unsigned int hw_prio; - hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? - amdgpu_ctx_sched_prio_to_compute_prio(prio) : - AMDGPU_RING_PRIO_DEFAULT; + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + + switch (hw_ip) { + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio); + break; + case AMDGPU_HW_IP_VCE: + case AMDGPU_HW_IP_VCN_ENC: + hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio); + break; + default: + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + break; + } + hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) hw_prio = AMDGPU_RING_PRIO_DEFAULT; @@ -89,15 +161,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, return hw_prio; } + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, - const u32 ring) + const u32 ring) { struct amdgpu_device *adev = ctx->adev; struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; unsigned num_scheds = 0; + int32_t ctx_prio; unsigned int hw_prio; - enum drm_sched_priority priority; + enum drm_sched_priority drm_prio; int r; entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), @@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, if (!entity) return -ENOMEM; + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; entity->sequence = 1; - priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? - ctx->init_priority : ctx->override_priority; - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip); + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); + drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); scheds = adev->gpu_sched[hw_ip][hw_prio].sched; @@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, num_scheds = 1; } - r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, &ctx->guilty); if (r) goto error_free_entity; @@ -139,7 +214,7 @@ error_free_entity: } static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, + int32_t priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { @@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->reset_counter_query = ctx->reset_counter; ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; return 0; } @@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum drm_sched_priority priority, + int32_t priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, { int r; uint32_t id; - enum drm_sched_priority priority; + int32_t priority; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; id = args->in.ctx_id; - r = amdgpu_to_sched_priority(args->in.priority, &priority); + priority = args->in.priority; /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (r == -EINVAL) - priority = DRM_SCHED_PRIORITY_NORMAL; + if (!amdgpu_ctx_priority_is_valid(priority)) + priority = AMDGPU_CTX_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: @@ -515,9 +590,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, - struct amdgpu_ctx_entity *aentity, - int hw_ip, - enum drm_sched_priority priority) + struct amdgpu_ctx_entity *aentity, + int hw_ip, + int32_t priority) { struct amdgpu_device *adev = ctx->adev; unsigned int hw_prio; @@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, unsigned num_scheds; /* set sw priority */ - drm_sched_entity_set_priority(&aentity->entity, priority); + drm_sched_entity_set_priority(&aentity->entity, + amdgpu_ctx_to_drm_sched_prio(priority)); /* set hw priority */ if (hw_ip == AMDGPU_HW_IP_COMPUTE) { - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, - AMDGPU_HW_IP_COMPUTE); + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); scheds = adev->gpu_sched[hw_ip][hw_prio].sched; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; @@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, } void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority) + int32_t priority) { - enum drm_sched_priority ctx_prio; + int32_t ctx_prio; unsigned i, j; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index 14db16bc3322..a44b8b8ed39c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -47,8 +47,8 @@ struct amdgpu_ctx { spinlock_t ring_lock; struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; bool preamble_presented; - enum drm_sched_priority init_priority; - enum drm_sched_priority override_priority; + int32_t init_priority; + int32_t override_priority; struct mutex lock; atomic_t guilty; unsigned long ras_counter_ce; @@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct drm_sched_entity *entity, uint64_t seq); -void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority); +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio); +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 277128846dd1..164d6a9e9fbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -27,7 +27,6 @@ #include <linux/pci.h> #include <linux/uaccess.h> #include <linux/pm_runtime.h> -#include <linux/poll.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -36,87 +35,10 @@ #include "amdgpu_rap.h" #include "amdgpu_securedisplay.h" #include "amdgpu_fw_attestation.h" - -int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev) -{ -#if defined(CONFIG_DEBUG_FS) - unsigned long timeout = 600 * HZ; - int ret; - - wake_up_interruptible(&adev->autodump.gpu_hang); - - ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout); - if (ret == 0) { - pr_err("autodump: timeout, move on to gpu recovery\n"); - return -ETIMEDOUT; - } -#endif - return 0; -} +#include "amdgpu_umr.h" #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file) -{ - struct amdgpu_device *adev = inode->i_private; - int ret; - - file->private_data = adev; - - ret = down_read_killable(&adev->reset_sem); - if (ret) - return ret; - - if (adev->autodump.dumping.done) { - reinit_completion(&adev->autodump.dumping); - ret = 0; - } else { - ret = -EBUSY; - } - - up_read(&adev->reset_sem); - - return ret; -} - -static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file) -{ - struct amdgpu_device *adev = file->private_data; - - complete_all(&adev->autodump.dumping); - return 0; -} - -static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table) -{ - struct amdgpu_device *adev = file->private_data; - - poll_wait(file, &adev->autodump.gpu_hang, poll_table); - - if (amdgpu_in_reset(adev)) - return POLLIN | POLLRDNORM | POLLWRNORM; - - return 0; -} - -static const struct file_operations autodump_debug_fops = { - .owner = THIS_MODULE, - .open = amdgpu_debugfs_autodump_open, - .poll = amdgpu_debugfs_autodump_poll, - .release = amdgpu_debugfs_autodump_release, -}; - -static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev) -{ - init_completion(&adev->autodump.dumping); - complete_all(&adev->autodump.dumping); - init_waitqueue_head(&adev->autodump.gpu_hang); - - debugfs_create_file("amdgpu_autodump", 0600, - adev_to_drm(adev)->primary->debugfs_root, - adev, &autodump_debug_fops); -} - /** * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes * @@ -279,6 +201,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); } +static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd; + + rd = kzalloc(sizeof *rd, GFP_KERNEL); + if (!rd) + return -ENOMEM; + rd->adev = file_inode(file)->i_private; + file->private_data = rd; + mutex_init(&rd->lock); + + return 0; +} + +static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file) +{ + struct amdgpu_debugfs_regs2_data *rd = file->private_data; + mutex_destroy(&rd->lock); + kfree(file->private_data); + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + struct amdgpu_device *adev = rd->adev; + ssize_t result = 0; + int r; + uint32_t value; + + if (size & 0x3 || offset & 0x3) + return -EINVAL; + + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + r = amdgpu_virt_enable_access_debugfs(adev); + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + return r; + } + + mutex_lock(&rd->lock); + + if (rd->id.use_grbm) { + if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || + (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + amdgpu_virt_disable_access_debugfs(adev); + mutex_unlock(&rd->lock); + return -EINVAL; + } + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se, + rd->id.grbm.sh, + rd->id.grbm.instance); + } + + if (rd->id.use_srbm) { + mutex_lock(&adev->srbm_mutex); + amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe, + rd->id.srbm.queue, rd->id.srbm.vmid); + } + + if (rd->id.pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + if (!write_en) { + value = RREG32(offset >> 2); + r = put_user(value, (uint32_t *)buf); + } else { + r = get_user(value, (uint32_t *)buf); + if (!r) + amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value); + } + if (r) { + result = r; + goto end; + } + offset += 4; + size -= 4; + result += 4; + buf += 4; + } +end: + if (rd->id.use_grbm) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (rd->id.use_srbm) { + amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + } + + if (rd->id.pg_lock) + mutex_unlock(&adev->pm.mutex); + + mutex_unlock(&rd->lock); + + pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + + amdgpu_virt_disable_access_debugfs(adev); + return result; +} + +static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data) +{ + struct amdgpu_debugfs_regs2_data *rd = f->private_data; + int r; + + switch (cmd) { + case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE: + mutex_lock(&rd->lock); + r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id); + mutex_unlock(&rd->lock); + return r ? -EINVAL : 0; + default: + return -EINVAL; + } + return 0; +} + +static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0); +} + +static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) +{ + return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1); +} + /** * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register @@ -1091,6 +1152,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, return result; } +static const struct file_operations amdgpu_debugfs_regs2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl, + .read = amdgpu_debugfs_regs2_read, + .write = amdgpu_debugfs_regs2_write, + .open = amdgpu_debugfs_regs2_open, + .release = amdgpu_debugfs_regs2_release, + .llseek = default_llseek +}; + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -1148,6 +1219,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = { static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs2_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, @@ -1160,6 +1232,7 @@ static const struct file_operations *debugfs_regs[] = { static const char *debugfs_regs_names[] = { "amdgpu_regs", + "amdgpu_regs2", "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", @@ -1206,7 +1279,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) } /* Avoid accidently unparking the sched thread during GPU reset */ - r = down_read_killable(&adev->reset_sem); + r = down_write_killable(&adev->reset_sem); if (r) return r; @@ -1235,7 +1308,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) kthread_unpark(ring->sched.thread); } - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1255,7 +1328,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) return r; } - *val = amdgpu_bo_evict_vram(adev); + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1268,17 +1341,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) { struct amdgpu_device *adev = (struct amdgpu_device *)data; struct drm_device *dev = adev_to_drm(adev); - struct ttm_resource_manager *man; int r; r = pm_runtime_get_sync(dev->dev); if (r < 0) { - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + pm_runtime_put_autosuspend(dev->dev); return r; } - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); - *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); @@ -1544,20 +1615,21 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) struct dentry *ent; int r, i; - + if (!debugfs_initialized()) + return 0; ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, &fops_sclk_set); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } /* Register debugfs entries for amdgpu_ttm */ @@ -1584,13 +1656,10 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) if (!ring) continue; - if (amdgpu_debugfs_ring_init(adev, ring)) { - DRM_ERROR("Failed to register debugfs file for rings !\n"); - } + amdgpu_debugfs_ring_init(adev, ring); } amdgpu_ras_debugfs_create_all(adev); - amdgpu_debugfs_autodump_init(adev); amdgpu_rap_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); @@ -1609,6 +1678,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) debugfs_create_blob("amdgpu_vbios", 0444, root, &adev->debugfs_vbios_blob); + adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; + adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->debugfs_discovery_blob); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 141a8474e24f..371a6f0deb29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -22,14 +22,9 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ - /* * Debugfs */ -struct amdgpu_autodump { - struct completion dumping; - struct wait_queue_head gpu_hang; -}; int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev); @@ -37,4 +32,3 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev); void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); -int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 41c6b3aacd37..90d22a376632 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -125,6 +125,7 @@ const char *amdgpu_asic_name[] = { "DIMGREY_CAVEFISH", "BEIGE_GOBY", "YELLOW_CARP", + "IP DISCOVERY", "LAST", }; @@ -305,7 +306,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, uint64_t last; int idx; - if (!drm_dev_enter(&adev->ddev, &idx)) + if (!drm_dev_enter(adev_to_drm(adev), &idx)) return; BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); @@ -2126,46 +2127,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: - if (adev->flags & AMD_IS_APU) - adev->family = AMDGPU_FAMILY_RV; - else - adev->family = AMDGPU_FAMILY_AI; - - r = soc15_set_ip_blocks(adev); - if (r) - return r; - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - if (adev->asic_type == CHIP_VANGOGH) - adev->family = AMDGPU_FAMILY_VGH; - else if (adev->asic_type == CHIP_YELLOW_CARP) - adev->family = AMDGPU_FAMILY_YC; - else - adev->family = AMDGPU_FAMILY_NV; - - r = nv_set_ip_blocks(adev); + default: + r = amdgpu_discovery_set_ip_blocks(adev); if (r) return r; break; - default: - /* FIXME: not supported yet */ - return -EINVAL; } amdgpu_amdkfd_device_probe(adev); @@ -2741,6 +2707,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + if (amdgpu_sriov_vf(adev)) { + if (amdgpu_virt_release_full_gpu(adev, false)) + DRM_ERROR("failed to release exclusive mode on fini\n"); + } + return 0; } @@ -2801,10 +2772,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) amdgpu_ras_fini(adev); - if (amdgpu_sriov_vf(adev)) - if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); - return 0; } @@ -3148,6 +3115,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -3196,11 +3167,21 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) { switch (asic_type) { #if defined(CONFIG_DRM_AMD_DC) -#if defined(CONFIG_DRM_AMD_DC_SI) case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + /* + * We have systems in the wild with these ASICs that require + * LVDS and VGA support which is not supported with DC. + * + * Fallback to the non-DC driver here by default so as not to + * cause regressions. + */ +#if defined(CONFIG_DRM_AMD_DC_SI) + return amdgpu_dc > 0; +#else + return false; #endif case CHIP_BONAIRE: case CHIP_KAVERI: @@ -3232,6 +3213,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_RENOIR: + case CHIP_CYAN_SKILLFISH: case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: @@ -3239,13 +3221,15 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VANGOGH: case CHIP_YELLOW_CARP: #endif + default: return amdgpu_dc != 0; -#endif +#else default: if (amdgpu_dc > 0) DRM_INFO_ONCE("Display Core has been requested via kernel parameter " "but isn't supported by ASIC, ignoring\n"); return false; +#endif } } @@ -3346,6 +3330,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) continue; } else if (timeout < 0) { timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); } else { timeout = msecs_to_jiffies(timeout); } @@ -3523,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -3530,17 +3519,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* enable PCIE atomic ops */ - r = pci_enable_atomic_ops_to_root(adev->pdev, - PCI_EXP_DEVCAP2_ATOMIC_COMP32 | - PCI_EXP_DEVCAP2_ATOMIC_COMP64); - if (r) { - adev->have_atomics_support = false; - DRM_INFO("PCIE atomic ops is not supported\n"); - } else { - adev->have_atomics_support = true; - } - amdgpu_device_get_pcie_info(adev); if (amdgpu_mcbp) @@ -3563,6 +3541,19 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + /* enable PCIE atomic ops */ + if (amdgpu_sriov_vf(adev)) + adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) + adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags == + (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + else + adev->have_atomics_support = + !pci_enable_atomic_ops_to_root(adev->pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (!adev->have_atomics_support) + dev_info(adev->dev, "PCIE atomic ops is not supported\n"); + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -3696,8 +3687,6 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); - amdgpu_fbdev_init(adev); - r = amdgpu_pm_sysfs_init(adev); if (r) { adev->pm_sysfs_en = false; @@ -3842,7 +3831,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); @@ -3855,11 +3844,11 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_ucode_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); - amdgpu_fbdev_fini(adev); + amdgpu_device_ip_fini_early(adev); amdgpu_irq_fini_hw(adev); - amdgpu_device_ip_fini_early(adev); + ttm_device_clear_dma_mappings(&adev->mman.bdev); amdgpu_gart_dummy_page_fini(adev); @@ -3868,8 +3857,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev) { - amdgpu_device_ip_fini(adev); amdgpu_fence_driver_sw_fini(adev); + amdgpu_device_ip_fini(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; @@ -3901,6 +3890,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) } +/** + * amdgpu_device_evict_resources - evict device resources + * @adev: amdgpu device object + * + * Evicts all ttm device resources(vram BOs, gart table) from the lru list + * of the vram memory type. Mainly used for evicting device resources + * at suspend time. + * + */ +static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +{ + /* No need to evict vram on APUs for suspend to ram */ + if (adev->in_s3 && (adev->flags & AMD_IS_APU)) + return; + + if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + DRM_WARN("evicting device resources failed\n"); + +} /* * Suspend & resume. @@ -3930,7 +3938,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_kms_helper_poll_disable(dev); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -3941,17 +3949,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - /* evict vram memory */ - amdgpu_bo_evict_vram(adev); + /* First evict vram memory */ + amdgpu_device_evict_resources(adev); amdgpu_fence_driver_hw_fini(adev); amdgpu_device_ip_suspend_phase2(adev); - /* evict remaining vram memory - * This second call to evict vram is to evict the gart page table - * using the CPU. + /* This second call to evict device resources is to evict + * the gart page table using the CPU. */ - amdgpu_bo_evict_vram(adev); + amdgpu_device_evict_resources(adev); return 0; } @@ -4008,7 +4015,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) flush_delayed_work(&adev->delayed_init_work); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); drm_kms_helper_poll_enable(dev); @@ -4278,6 +4285,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; + amdgpu_amdkfd_pre_reset(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -4285,8 +4294,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, if (r) return r; - amdgpu_amdkfd_pre_reset(adev); - /* Resume IP prior to SMC */ r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) @@ -4307,7 +4314,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); - amdgpu_amdkfd_post_reset(adev); error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -4458,10 +4464,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (reset_context->reset_req_dev == adev) job = reset_context->job; - /* no need to dump if device is not in good state during probe period */ - if (!adev->gmc.xgmi.pending_reset) - amdgpu_debugfs_wait_dump(adev); - if (amdgpu_sriov_vf(adev)) { /* stop the data exchange thread */ amdgpu_virt_fini_data_exchange(adev); @@ -4601,6 +4603,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, dev_warn(tmp_adev->dev, "asic atom init failed!"); } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_amdkfd_resume_iommu(tmp_adev); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) goto out; @@ -4640,7 +4646,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - amdgpu_fbdev_set_suspend(tmp_adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); /* * The GPU enters bad state once faulty pages @@ -4848,6 +4854,9 @@ static void amdgpu_device_recheck_guilty_jobs( /* clear job's guilty and depend the folowing step to decide the real one */ drm_sched_reset_karma(s_job); + /* for the real bad job, it will be resubmitted twice, adding a dma_fence_get + * to make sure fence is balanced */ + dma_fence_get(s_job->s_fence->parent); drm_sched_resubmit_jobs_ext(&ring->sched, 1); ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); @@ -4883,6 +4892,7 @@ retry: /* got the hw fence, signal finished fence */ atomic_dec(ring->sched.score); + dma_fence_put(s_job->s_fence->parent); dma_fence_get(&s_job->s_fence->finished); dma_fence_signal(&s_job->s_fence->finished); dma_fence_put(&s_job->s_fence->finished); @@ -5027,7 +5037,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - amdgpu_fbdev_set_suspend(tmp_adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5077,7 +5087,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -5118,7 +5128,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -5139,7 +5149,7 @@ skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before @@ -5387,6 +5397,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta return PCI_ERS_RESULT_DISCONNECT; } + adev->pci_channel_state = state; + switch (state) { case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; @@ -5529,6 +5541,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev) DRM_INFO("PCI error: resume callback!!\n"); + /* Only continue execution for the case of pci_channel_io_frozen */ + if (adev->pci_channel_state != pci_channel_io_frozen) + return; + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h index 52488bb45112..6b25837955c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h @@ -52,6 +52,7 @@ struct amdgpu_df_funcs { uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, uint32_t ficadl_val, uint32_t ficadh_val); + bool (*query_ras_poison_mode)(struct amdgpu_device *adev); }; struct amdgpu_df { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ada7bc19118a..ea00090b3fb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -21,16 +21,58 @@ * */ +#include <linux/firmware.h> + #include "amdgpu.h" #include "amdgpu_discovery.h" #include "soc15_hw_ip.h" #include "discovery.h" +#include "soc15.h" +#include "gfx_v9_0.h" +#include "gmc_v9_0.h" +#include "df_v1_7.h" +#include "df_v3_6.h" +#include "nbio_v6_1.h" +#include "nbio_v7_0.h" +#include "nbio_v7_4.h" +#include "hdp_v4_0.h" +#include "vega10_ih.h" +#include "vega20_ih.h" +#include "sdma_v4_0.h" +#include "uvd_v7_0.h" +#include "vce_v4_0.h" +#include "vcn_v1_0.h" +#include "vcn_v2_5.h" +#include "jpeg_v2_5.h" +#include "smuio_v9_0.h" +#include "gmc_v10_0.h" +#include "gfxhub_v2_0.h" +#include "mmhub_v2_0.h" +#include "nbio_v2_3.h" +#include "nbio_v7_2.h" +#include "hdp_v5_0.h" +#include "nv.h" +#include "navi10_ih.h" +#include "gfx_v10_0.h" +#include "sdma_v5_0.h" +#include "sdma_v5_2.h" +#include "vcn_v2_0.h" +#include "jpeg_v2_0.h" +#include "vcn_v3_0.h" +#include "jpeg_v3_0.h" +#include "amdgpu_vkms.h" +#include "mes_v10_1.h" +#include "smuio_v11_0.h" +#include "smuio_v11_0_6.h" +#include "smuio_v13_0.h" + +MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); + #define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 -#define HW_ID_MAX 300 static const char *hw_id_names[HW_ID_MAX] = { [MP1_HWID] = "MP1", @@ -66,6 +108,8 @@ static const char *hw_id_names[HW_ID_MAX] = { [HDP_HWID] = "HDP", [SDMA0_HWID] = "SDMA0", [SDMA1_HWID] = "SDMA1", + [SDMA2_HWID] = "SDMA2", + [SDMA3_HWID] = "SDMA3", [ISP_HWID] = "ISP", [DBGU_IO_HWID] = "DBGU_IO", [DF_HWID] = "DF", @@ -113,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = { [HDP_HWIP] = HDP_HWID, [SDMA0_HWIP] = SDMA0_HWID, [SDMA1_HWIP] = SDMA1_HWID, + [SDMA2_HWIP] = SDMA2_HWID, + [SDMA3_HWIP] = SDMA3_HWID, [MMHUB_HWIP] = MMHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID, [NBIO_HWIP] = NBIF_HWID, @@ -129,6 +175,8 @@ static int hw_id_map[MAX_HWIP] = { [THM_HWIP] = THM_HWID, [CLK_HWIP] = CLKA_HWID, [UMC_HWIP] = UMC_HWID, + [XGMI_HWIP] = XGMI_HWID, + [DCI_HWIP] = DCI_HWID, }; static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) @@ -164,6 +212,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct gpu_info_header *ghdr; + const struct firmware *fw; uint16_t offset; uint16_t size; uint16_t checksum; @@ -174,10 +223,21 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (!adev->mman.discovery_bin) return -ENOMEM; - r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); - if (r) { - DRM_ERROR("failed to read ip discovery binary\n"); - goto out; + if (amdgpu_discovery == 2) { + r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev); + if (r) + goto get_from_vram; + dev_info(adev->dev, "Using IP discovery from file\n"); + memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data, + adev->mman.discovery_tmr_size); + release_firmware(fw); + } else { +get_from_vram: + r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); + if (r) { + DRM_ERROR("failed to read ip discovery binary\n"); + goto out; + } } bhdr = (struct binary_header *)adev->mman.discovery_bin; @@ -190,8 +250,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); - size = bhdr->binary_size - offset; - checksum = bhdr->binary_checksum; + size = le16_to_cpu(bhdr->binary_size) - offset; + checksum = le16_to_cpu(bhdr->binary_checksum); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { @@ -212,7 +272,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ihdr->size, checksum)) { + le16_to_cpu(ihdr->size), checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -224,7 +284,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ghdr->size, checksum)) { + le32_to_cpu(ghdr->size), checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -245,6 +305,22 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev) adev->mman.discovery_bin = NULL; } +static int amdgpu_discovery_validate_ip(const struct ip *ip) +{ + if (ip->number_instance >= HWIP_MAX_INSTANCE) { + DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", + ip->number_instance); + return -EINVAL; + } + if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { + DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", + le16_to_cpu(ip->hw_id)); + return -EINVAL; + } + + return 0; +} + int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) { struct binary_header *bhdr; @@ -290,6 +366,10 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) for (j = 0; j < num_ips; j++) { ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); + + if (amdgpu_discovery_validate_ip(ip)) + goto next_ip; + num_base_address = ip->num_base_address; DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", @@ -301,6 +381,11 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) if (le16_to_cpu(ip->hw_id) == VCN_HWID) adev->vcn.num_vcn_inst++; + if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || + le16_to_cpu(ip->hw_id) == SDMA1_HWID || + le16_to_cpu(ip->hw_id) == SDMA2_HWID || + le16_to_cpu(ip->hw_id) == SDMA3_HWID) + adev->sdma.num_instances++; for (k = 0; k < num_base_address; k++) { /* @@ -317,10 +402,21 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) hw_id_names[le16_to_cpu(ip->hw_id)]); adev->reg_offset[hw_ip][ip->number_instance] = ip->base_address; + /* Instance support is somewhat inconsistent. + * SDMA is a good example. Sienna cichlid has 4 total + * SDMA instances, each enumerated separately (HWIDs + * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, + * but they are enumerated as multiple instances of the + * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another + * example. On most chips there are multiple instances + * with the same HWID. + */ + adev->ip_versions[hw_ip][ip->number_instance] = + IP_VERSION(ip->major, ip->minor, ip->revision); } - } +next_ip: ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); } } @@ -395,12 +491,16 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); for (i = 0; i < 32; i++) { - if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) break; - switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: vcn_harvest_count++; + if (harvest_info->list[i].number_instance == 0) + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; + else + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; break; case DMU_HWID: adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; @@ -409,10 +509,21 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } + /* some IP discovery tables on Navy Flounder don't have this set correctly */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; if (vcn_harvest_count == adev->vcn.num_vcn_inst) { adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; } + if ((adev->pdev->device == 0x731E && + (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) || + (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) || + (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; + adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; + } } int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) @@ -450,3 +561,792 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) return 0; } + +static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) +{ + /* what IP to use for this? */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add common ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) +{ + /* use GC or MMHUB IP version */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[OSSSYS_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 3, 0): + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + break; + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 1): + case IP_VERSION(4, 4, 0): + amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 1): + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", + adev->ip_versions[OSSSYS_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + break; + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + break; + case IP_VERSION(11, 0, 8): + amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); + break; + case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 1): + amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 3): + amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add psp ip block(MP0_HWIP:0x%x)\n", + adev->ip_versions[MP0_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_ARCTURUS) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + else + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 8): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + break; + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): + amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); + break; + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 3): + amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add smu ip block(MP1_HWIP:0x%x)\n", + adev->ip_versions[MP1_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) +{ + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { + amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); +#if defined(CONFIG_DRM_AMD_DC) + } else if (adev->ip_versions[DCE_HWIP][0]) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + amdgpu_device_ip_block_add(adev, &dm_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add dm ip block(DCE_HWIP:0x%x)\n", + adev->ip_versions[DCE_HWIP][0]); + return -EINVAL; + } + } else if (adev->ip_versions[DCI_HWIP][0]) { + switch (adev->ip_versions[DCI_HWIP][0]) { + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): + case IP_VERSION(12, 1, 0): + amdgpu_device_ip_block_add(adev, &dm_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add dm ip block(DCI_HWIP:0x%x)\n", + adev->ip_versions[DCI_HWIP][0]); + return -EINVAL; + } +#endif + } + return 0; +} + +static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 2): + case IP_VERSION(4, 4, 0): + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): + amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + break; + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 4): + case IP_VERSION(5, 2, 5): + case IP_VERSION(5, 2, 3): + case IP_VERSION(5, 2, 1): + amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", + adev->ip_versions[SDMA0_HWIP][0]); + return -EINVAL; + } + return 0; +} + +static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) +{ + if (adev->ip_versions[VCE_HWIP][0]) { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 2, 0): + /* UVD is not supported on vega20 SR-IOV */ + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); + return -EINVAL; + } + switch (adev->ip_versions[VCE_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 1, 0): + /* VCE is not supported on vega20 SR-IOV */ + if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", + adev->ip_versions[VCE_HWIP][0]); + return -EINVAL; + } + } else { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); + break; + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 2, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); + break; + case IP_VERSION(2, 0, 3): + break; + case IP_VERSION(2, 5, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); + break; + case IP_VERSION(2, 6, 0): + amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); + break; + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 1, 1): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 192): + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); + break; + case IP_VERSION(3, 0, 33): + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + break; + default: + dev_err(adev->dev, + "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); + return -EINVAL; + } + } + return 0; +} + +static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); + break; + default: + break;; + } + return 0; +} + +int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) +{ + int r; + + switch (adev->asic_type) { + case CHIP_VEGA10: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); + break; + case CHIP_VEGA12: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); + break; + case CHIP_RAVEN: + vega10_reg_base_init(adev); + adev->sdma.num_instances = 1; + adev->vcn.num_vcn_inst = 1; + if (adev->apu_flags & AMD_APU_IS_RAVEN2) { + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); + adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); + } else { + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); + adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); + } + break; + case CHIP_VEGA20: + vega20_reg_base_init(adev); + adev->sdma.num_instances = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); + adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); + adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); + break; + case CHIP_ARCTURUS: + arct_reg_base_init(adev); + adev->sdma.num_instances = 8; + adev->vcn.num_vcn_inst = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); + adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); + break; + case CHIP_ALDEBARAN: + aldebaran_reg_base_init(adev); + adev->sdma.num_instances = 5; + adev->vcn.num_vcn_inst = 2; + adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); + adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); + adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); + adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); + adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); + adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); + adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); + adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); + adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); + adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); + break; + default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) + return -EINVAL; + + amdgpu_discovery_harvest_ip(adev); + + if (!adev->mman.discovery_bin) { + DRM_ERROR("ip discovery uninitialized\n"); + return -EINVAL; + } + break; + } + + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + adev->family = AMDGPU_FAMILY_AI; + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + adev->family = AMDGPU_FAMILY_RV; + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + adev->family = AMDGPU_FAMILY_NV; + break; + case IP_VERSION(10, 3, 1): + adev->family = AMDGPU_FAMILY_VGH; + break; + case IP_VERSION(10, 3, 3): + adev->family = AMDGPU_FAMILY_YC; + break; + default: + return -EINVAL; + } + + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) + adev->gmc.xgmi.supported = true; + + /* set NBIO version */ + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 2, 0): + adev->nbio.funcs = &nbio_v6_1_funcs; + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; + break; + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + case IP_VERSION(2, 5, 0): + adev->nbio.funcs = &nbio_v7_0_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; + break; + case IP_VERSION(7, 4, 0): + case IP_VERSION(7, 4, 1): + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; + break; + case IP_VERSION(7, 4, 4): + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald; + break; + case IP_VERSION(7, 2, 0): + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): + adev->nbio.funcs = &nbio_v7_2_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; + break; + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 3, 1): + case IP_VERSION(2, 3, 2): + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; + break; + case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): + case IP_VERSION(3, 3, 2): + case IP_VERSION(3, 3, 3): + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc; + break; + default: + break; + } + + switch (adev->ip_versions[HDP_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 0, 1): + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 1): + case IP_VERSION(4, 4, 0): + adev->hdp.funcs = &hdp_v4_0_funcs; + break; + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 1): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 0, 4): + case IP_VERSION(5, 2, 0): + adev->hdp.funcs = &hdp_v5_0_funcs; + break; + default: + break; + } + + switch (adev->ip_versions[DF_HWIP][0]) { + case IP_VERSION(3, 6, 0): + case IP_VERSION(3, 6, 1): + case IP_VERSION(3, 6, 2): + adev->df.funcs = &df_v3_6_funcs; + break; + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 5, 0): + case IP_VERSION(3, 5, 1): + case IP_VERSION(3, 5, 2): + adev->df.funcs = &df_v1_7_funcs; + break; + default: + break; + } + + switch (adev->ip_versions[SMUIO_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(9, 0, 1): + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(10, 0, 2): + adev->smuio.funcs = &smuio_v9_0_funcs; + break; + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 8): + adev->smuio.funcs = &smuio_v11_0_funcs; + break; + case IP_VERSION(11, 0, 6): + case IP_VERSION(11, 0, 10): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(13, 0, 1): + adev->smuio.funcs = &smuio_v11_0_6_funcs; + break; + case IP_VERSION(13, 0, 2): + adev->smuio.funcs = &smuio_v13_0_funcs; + break; + default: + break; + } + + r = amdgpu_discovery_set_common_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_gmc_ip_blocks(adev); + if (r) + return r; + + /* For SR-IOV, PSP needs to be initialized before IH */ + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_discovery_set_psp_ip_blocks(adev); + if (r) + return r; + r = amdgpu_discovery_set_ih_ip_blocks(adev); + if (r) + return r; + } else { + r = amdgpu_discovery_set_ih_ip_blocks(adev); + if (r) + return r; + + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + r = amdgpu_discovery_set_psp_ip_blocks(adev); + if (r) + return r; + } + } + + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + r = amdgpu_discovery_set_smu_ip_blocks(adev); + if (r) + return r; + } + + r = amdgpu_discovery_set_display_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_gc_ip_blocks(adev); + if (r) + return r; + + r = amdgpu_discovery_set_sdma_ip_blocks(adev); + if (r) + return r; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && + !amdgpu_sriov_vf(adev)) { + r = amdgpu_discovery_set_smu_ip_blocks(adev); + if (r) + return r; + } + + r = amdgpu_discovery_set_mm_ip_blocks(adev); + if (r) + return r; + + if (adev->enable_mes) { + r = amdgpu_discovery_set_mes_ip_blocks(adev); + if (r) + return r; + } + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 48e6b88cfdfe..0ea029e3b850 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -36,5 +36,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); +int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 7a7316731911..18cc7155e667 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -83,9 +83,6 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work) unsigned i; int vpos, hpos; - if (amdgpu_display_flip_handle_fence(work, &work->excl)) - return; - for (i = 0; i < work->shared_count; ++i) if (amdgpu_display_flip_handle_fence(work, &work->shared[i])) return; @@ -203,7 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + r = dma_resv_get_fences(new_abo->tbo.base.resv, NULL, &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); @@ -253,7 +250,6 @@ unreserve: cleanup: amdgpu_bo_unref(&work->old_abo); - dma_fence_put(work->excl); for (i = 0; i < work->shared_count; ++i) dma_fence_put(work->shared[i]); kfree(work->shared); @@ -837,6 +833,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb) return 0; } +/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */ +static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) +{ + u64 micro_tile_mode; + + /* Zero swizzle mode means linear */ + if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) + return 0; + + micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); + switch (micro_tile_mode) { + case 0: /* DISPLAY */ + case 3: /* RENDER */ + return 0; + default: + drm_dbg_kms(afb->base.dev, + "Micro tile mode %llu not supported for scanout\n", + micro_tile_mode); + return -EINVAL; + } +} + static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, unsigned int *width, unsigned int *height) { @@ -1103,6 +1121,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { + struct amdgpu_device *adev = drm_to_adev(dev); int ret, i; /* @@ -1122,6 +1141,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; + if (!dev->mode_config.allow_fb_modifiers) { + drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, + "GFX9+ requires FB check based on format modifier\n"); + ret = check_tiling_flags_gfx6(rfb); + if (ret) + return ret; + } + if (dev->mode_config.allow_fb_modifiers && !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { ret = convert_tiling_flags_to_modifier(rfb); @@ -1572,13 +1599,10 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) continue; } robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ae6ab93c868b..4896c876ffec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -61,9 +61,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0) attach->peer2peer = false; - if (attach->dev->driver == adev->dev->driver) - return 0; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f18240f87387..3a6f125c6dc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -31,13 +31,13 @@ #include "amdgpu_drv.h" #include <drm/drm_pciids.h> -#include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> #include <drm/drm_probe_helper.h> #include <linux/mmu_notifier.h> #include <linux/suspend.h> +#include <linux/cc_platform.h> #include "amdgpu.h" #include "amdgpu_irq.h" @@ -96,9 +96,11 @@ * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ * - 3.41.0 - Add video codec query * - 3.42.0 - Add 16bpc fixed point display support + * - 3.43.0 - Add device hot plug/unplug support + * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 42 +#define KMS_DRIVER_MINOR 44 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit; @@ -627,7 +629,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444); /** * DOC: discovery (int) * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM. - * (-1 = auto (default), 0 = disabled, 1 = enabled) + * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file) */ MODULE_PARM_DESC(discovery, "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM"); @@ -875,7 +877,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444); * result in the GPU entering bad status when the number of total * faulty pages by ECC exceeds the threshold value. */ -MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)"); +MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)"); module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444); MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)"); @@ -890,6 +892,636 @@ MODULE_PARM_DESC(smu_pptable_id, "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); +/* These devices are not supported by amdgpu. + * They are supported by the mach64, r128, radeon drivers + */ +static const u16 amdgpu_unsupported_pciidlist[] = { + /* mach64 */ + 0x4354, + 0x4358, + 0x4554, + 0x4742, + 0x4744, + 0x4749, + 0x474C, + 0x474D, + 0x474E, + 0x474F, + 0x4750, + 0x4751, + 0x4752, + 0x4753, + 0x4754, + 0x4755, + 0x4756, + 0x4757, + 0x4758, + 0x4759, + 0x475A, + 0x4C42, + 0x4C44, + 0x4C47, + 0x4C49, + 0x4C4D, + 0x4C4E, + 0x4C50, + 0x4C51, + 0x4C52, + 0x4C53, + 0x5654, + 0x5655, + 0x5656, + /* r128 */ + 0x4c45, + 0x4c46, + 0x4d46, + 0x4d4c, + 0x5041, + 0x5042, + 0x5043, + 0x5044, + 0x5045, + 0x5046, + 0x5047, + 0x5048, + 0x5049, + 0x504A, + 0x504B, + 0x504C, + 0x504D, + 0x504E, + 0x504F, + 0x5050, + 0x5051, + 0x5052, + 0x5053, + 0x5054, + 0x5055, + 0x5056, + 0x5057, + 0x5058, + 0x5245, + 0x5246, + 0x5247, + 0x524b, + 0x524c, + 0x534d, + 0x5446, + 0x544C, + 0x5452, + /* radeon */ + 0x3150, + 0x3151, + 0x3152, + 0x3154, + 0x3155, + 0x3E50, + 0x3E54, + 0x4136, + 0x4137, + 0x4144, + 0x4145, + 0x4146, + 0x4147, + 0x4148, + 0x4149, + 0x414A, + 0x414B, + 0x4150, + 0x4151, + 0x4152, + 0x4153, + 0x4154, + 0x4155, + 0x4156, + 0x4237, + 0x4242, + 0x4336, + 0x4337, + 0x4437, + 0x4966, + 0x4967, + 0x4A48, + 0x4A49, + 0x4A4A, + 0x4A4B, + 0x4A4C, + 0x4A4D, + 0x4A4E, + 0x4A4F, + 0x4A50, + 0x4A54, + 0x4B48, + 0x4B49, + 0x4B4A, + 0x4B4B, + 0x4B4C, + 0x4C57, + 0x4C58, + 0x4C59, + 0x4C5A, + 0x4C64, + 0x4C66, + 0x4C67, + 0x4E44, + 0x4E45, + 0x4E46, + 0x4E47, + 0x4E48, + 0x4E49, + 0x4E4A, + 0x4E4B, + 0x4E50, + 0x4E51, + 0x4E52, + 0x4E53, + 0x4E54, + 0x4E56, + 0x5144, + 0x5145, + 0x5146, + 0x5147, + 0x5148, + 0x514C, + 0x514D, + 0x5157, + 0x5158, + 0x5159, + 0x515A, + 0x515E, + 0x5460, + 0x5462, + 0x5464, + 0x5548, + 0x5549, + 0x554A, + 0x554B, + 0x554C, + 0x554D, + 0x554E, + 0x554F, + 0x5550, + 0x5551, + 0x5552, + 0x5554, + 0x564A, + 0x564B, + 0x564F, + 0x5652, + 0x5653, + 0x5657, + 0x5834, + 0x5835, + 0x5954, + 0x5955, + 0x5974, + 0x5975, + 0x5960, + 0x5961, + 0x5962, + 0x5964, + 0x5965, + 0x5969, + 0x5a41, + 0x5a42, + 0x5a61, + 0x5a62, + 0x5b60, + 0x5b62, + 0x5b63, + 0x5b64, + 0x5b65, + 0x5c61, + 0x5c63, + 0x5d48, + 0x5d49, + 0x5d4a, + 0x5d4c, + 0x5d4d, + 0x5d4e, + 0x5d4f, + 0x5d50, + 0x5d52, + 0x5d57, + 0x5e48, + 0x5e4a, + 0x5e4b, + 0x5e4c, + 0x5e4d, + 0x5e4f, + 0x6700, + 0x6701, + 0x6702, + 0x6703, + 0x6704, + 0x6705, + 0x6706, + 0x6707, + 0x6708, + 0x6709, + 0x6718, + 0x6719, + 0x671c, + 0x671d, + 0x671f, + 0x6720, + 0x6721, + 0x6722, + 0x6723, + 0x6724, + 0x6725, + 0x6726, + 0x6727, + 0x6728, + 0x6729, + 0x6738, + 0x6739, + 0x673e, + 0x6740, + 0x6741, + 0x6742, + 0x6743, + 0x6744, + 0x6745, + 0x6746, + 0x6747, + 0x6748, + 0x6749, + 0x674A, + 0x6750, + 0x6751, + 0x6758, + 0x6759, + 0x675B, + 0x675D, + 0x675F, + 0x6760, + 0x6761, + 0x6762, + 0x6763, + 0x6764, + 0x6765, + 0x6766, + 0x6767, + 0x6768, + 0x6770, + 0x6771, + 0x6772, + 0x6778, + 0x6779, + 0x677B, + 0x6840, + 0x6841, + 0x6842, + 0x6843, + 0x6849, + 0x684C, + 0x6850, + 0x6858, + 0x6859, + 0x6880, + 0x6888, + 0x6889, + 0x688A, + 0x688C, + 0x688D, + 0x6898, + 0x6899, + 0x689b, + 0x689c, + 0x689d, + 0x689e, + 0x68a0, + 0x68a1, + 0x68a8, + 0x68a9, + 0x68b0, + 0x68b8, + 0x68b9, + 0x68ba, + 0x68be, + 0x68bf, + 0x68c0, + 0x68c1, + 0x68c7, + 0x68c8, + 0x68c9, + 0x68d8, + 0x68d9, + 0x68da, + 0x68de, + 0x68e0, + 0x68e1, + 0x68e4, + 0x68e5, + 0x68e8, + 0x68e9, + 0x68f1, + 0x68f2, + 0x68f8, + 0x68f9, + 0x68fa, + 0x68fe, + 0x7100, + 0x7101, + 0x7102, + 0x7103, + 0x7104, + 0x7105, + 0x7106, + 0x7108, + 0x7109, + 0x710A, + 0x710B, + 0x710C, + 0x710E, + 0x710F, + 0x7140, + 0x7141, + 0x7142, + 0x7143, + 0x7144, + 0x7145, + 0x7146, + 0x7147, + 0x7149, + 0x714A, + 0x714B, + 0x714C, + 0x714D, + 0x714E, + 0x714F, + 0x7151, + 0x7152, + 0x7153, + 0x715E, + 0x715F, + 0x7180, + 0x7181, + 0x7183, + 0x7186, + 0x7187, + 0x7188, + 0x718A, + 0x718B, + 0x718C, + 0x718D, + 0x718F, + 0x7193, + 0x7196, + 0x719B, + 0x719F, + 0x71C0, + 0x71C1, + 0x71C2, + 0x71C3, + 0x71C4, + 0x71C5, + 0x71C6, + 0x71C7, + 0x71CD, + 0x71CE, + 0x71D2, + 0x71D4, + 0x71D5, + 0x71D6, + 0x71DA, + 0x71DE, + 0x7200, + 0x7210, + 0x7211, + 0x7240, + 0x7243, + 0x7244, + 0x7245, + 0x7246, + 0x7247, + 0x7248, + 0x7249, + 0x724A, + 0x724B, + 0x724C, + 0x724D, + 0x724E, + 0x724F, + 0x7280, + 0x7281, + 0x7283, + 0x7284, + 0x7287, + 0x7288, + 0x7289, + 0x728B, + 0x728C, + 0x7290, + 0x7291, + 0x7293, + 0x7297, + 0x7834, + 0x7835, + 0x791e, + 0x791f, + 0x793f, + 0x7941, + 0x7942, + 0x796c, + 0x796d, + 0x796e, + 0x796f, + 0x9400, + 0x9401, + 0x9402, + 0x9403, + 0x9405, + 0x940A, + 0x940B, + 0x940F, + 0x94A0, + 0x94A1, + 0x94A3, + 0x94B1, + 0x94B3, + 0x94B4, + 0x94B5, + 0x94B9, + 0x9440, + 0x9441, + 0x9442, + 0x9443, + 0x9444, + 0x9446, + 0x944A, + 0x944B, + 0x944C, + 0x944E, + 0x9450, + 0x9452, + 0x9456, + 0x945A, + 0x945B, + 0x945E, + 0x9460, + 0x9462, + 0x946A, + 0x946B, + 0x947A, + 0x947B, + 0x9480, + 0x9487, + 0x9488, + 0x9489, + 0x948A, + 0x948F, + 0x9490, + 0x9491, + 0x9495, + 0x9498, + 0x949C, + 0x949E, + 0x949F, + 0x94C0, + 0x94C1, + 0x94C3, + 0x94C4, + 0x94C5, + 0x94C6, + 0x94C7, + 0x94C8, + 0x94C9, + 0x94CB, + 0x94CC, + 0x94CD, + 0x9500, + 0x9501, + 0x9504, + 0x9505, + 0x9506, + 0x9507, + 0x9508, + 0x9509, + 0x950F, + 0x9511, + 0x9515, + 0x9517, + 0x9519, + 0x9540, + 0x9541, + 0x9542, + 0x954E, + 0x954F, + 0x9552, + 0x9553, + 0x9555, + 0x9557, + 0x955f, + 0x9580, + 0x9581, + 0x9583, + 0x9586, + 0x9587, + 0x9588, + 0x9589, + 0x958A, + 0x958B, + 0x958C, + 0x958D, + 0x958E, + 0x958F, + 0x9590, + 0x9591, + 0x9593, + 0x9595, + 0x9596, + 0x9597, + 0x9598, + 0x9599, + 0x959B, + 0x95C0, + 0x95C2, + 0x95C4, + 0x95C5, + 0x95C6, + 0x95C7, + 0x95C9, + 0x95CC, + 0x95CD, + 0x95CE, + 0x95CF, + 0x9610, + 0x9611, + 0x9612, + 0x9613, + 0x9614, + 0x9615, + 0x9616, + 0x9640, + 0x9641, + 0x9642, + 0x9643, + 0x9644, + 0x9645, + 0x9647, + 0x9648, + 0x9649, + 0x964a, + 0x964b, + 0x964c, + 0x964e, + 0x964f, + 0x9710, + 0x9711, + 0x9712, + 0x9713, + 0x9714, + 0x9715, + 0x9802, + 0x9803, + 0x9804, + 0x9805, + 0x9806, + 0x9807, + 0x9808, + 0x9809, + 0x980A, + 0x9900, + 0x9901, + 0x9903, + 0x9904, + 0x9905, + 0x9906, + 0x9907, + 0x9908, + 0x9909, + 0x990A, + 0x990B, + 0x990C, + 0x990D, + 0x990E, + 0x990F, + 0x9910, + 0x9913, + 0x9917, + 0x9918, + 0x9919, + 0x9990, + 0x9991, + 0x9992, + 0x9993, + 0x9994, + 0x9995, + 0x9996, + 0x9997, + 0x9998, + 0x9999, + 0x999A, + 0x999B, + 0x999C, + 0x999D, + 0x99A0, + 0x99A2, + 0x99A4, +}; + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1239,6 +1871,16 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, + { PCI_DEVICE(0x1002, PCI_ANY_ID), + .class = PCI_CLASS_DISPLAY_VGA << 8, + .class_mask = 0xffffff, + .driver_data = CHIP_IP_DISCOVERY }, + + { PCI_DEVICE(0x1002, PCI_ANY_ID), + .class = PCI_CLASS_DISPLAY_OTHER << 8, + .class_mask = 0xffffff, + .driver_data = CHIP_IP_DISCOVERY }, + {0, 0, 0} }; @@ -1252,9 +1894,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, struct drm_device *ddev; struct amdgpu_device *adev; unsigned long flags = ent->driver_data; - int ret, retry = 0; + int ret, retry = 0, i; bool supports_atomic = false; + /* skip devices which are owned by radeon */ + for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { + if (amdgpu_unsupported_pciidlist[i] == pdev->device) + return -ENODEV; + } + + if (flags == 0) { + DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); + return -ENODEV; + } + if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; @@ -1269,7 +1922,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, * however, SME requires an indirect IOMMU mapping because the encryption * bit is beyond the DMA mask of the chip. */ - if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) && + ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) { dev_info(&pdev->dev, "SME is not compatible with RAVEN\n"); return -ENOTSUPP; @@ -1347,6 +2001,19 @@ retry_init: goto err_pci; } + /* + * 1. don't init fbdev on hw without DCE + * 2. don't init fbdev if there are no connectors + */ + if (adev->mode_info.mode_config_initialized && + !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { + /* select 8 bpp console on low vram cards */ + if (adev->gmc.real_vram_size <= (32*1024*1024)) + drm_fbdev_generic_setup(adev_to_drm(adev), 8); + else + drm_fbdev_generic_setup(adev_to_drm(adev), 32); + } + ret = amdgpu_debugfs_init(adev); if (ret) DRM_ERROR("Creating debugfs files failed (%d).\n", ret); @@ -1508,6 +2175,10 @@ static int amdgpu_pmops_resume(struct device *dev) struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; + /* Avoids registers access if device is physically gone */ + if (!pci_device_is_present(adev->pdev)) + adev->no_hw_access = true; + r = amdgpu_device_resume(drm_dev, true); if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = false; @@ -1857,10 +2528,8 @@ static int __init amdgpu_init(void) { int r; - if (vgacon_text_force()) { - DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); + if (drm_firmware_drivers_only()) return -EINVAL; - } r = amdgpu_sync_init(); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c deleted file mode 100644 index cd0acbea75da..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright © 2007 David Airlie - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * David Airlie - */ - -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/slab.h> -#include <linux/vga_switcheroo.h> - -#include <drm/amdgpu_drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "amdgpu.h" -#include "cikd.h" -#include "amdgpu_gem.h" - -#include "amdgpu_display.h" - -/* object hierarchy - - this contains a helper + a amdgpu fb - the helper contains a pointer to amdgpu framebuffer baseclass. -*/ - -static int -amdgpufb_open(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - int ret = pm_runtime_get_sync(fb_helper->dev->dev); - if (ret < 0 && ret != -EACCES) { - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return ret; - } - return 0; -} - -static int -amdgpufb_release(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return 0; -} - -static const struct fb_ops amdgpufb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_open = amdgpufb_open, - .fb_release = amdgpufb_release, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, -}; - - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) -{ - int aligned = width; - int pitch_mask = 0; - - switch (cpp) { - case 1: - pitch_mask = 255; - break; - case 2: - pitch_mask = 127; - break; - case 3: - case 4: - pitch_mask = 63; - break; - } - - aligned += pitch_mask; - aligned &= ~pitch_mask; - return aligned * cpp; -} - -static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) -{ - struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - int ret; - - ret = amdgpu_bo_reserve(abo, true); - if (likely(ret == 0)) { - amdgpu_bo_kunmap(abo); - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - drm_gem_object_put(gobj); -} - -static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object **gobj_p) -{ - const struct drm_format_info *info; - struct amdgpu_device *adev = rfbdev->adev; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - bool fb_tiled = false; /* useful for testing */ - u32 tiling_flags = 0, domain; - int ret; - int aligned_size, size; - int height = mode_cmd->height; - u32 cpp; - u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED; - - info = drm_get_format_info(adev_to_drm(adev), mode_cmd); - cpp = info->cpp[0]; - - /* need to align pitch with crtc limits */ - mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, - fb_tiled); - domain = amdgpu_display_supported_domains(adev, flags); - height = ALIGN(mode_cmd->height, 8); - size = mode_cmd->pitches[0] * height; - aligned_size = ALIGN(size, PAGE_SIZE); - ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj); - if (ret) { - pr_err("failed to allocate framebuffer (%d)\n", aligned_size); - return -ENOMEM; - } - abo = gem_to_amdgpu_bo(gobj); - - if (fb_tiled) - tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); - - ret = amdgpu_bo_reserve(abo, false); - if (unlikely(ret != 0)) - goto out_unref; - - if (tiling_flags) { - ret = amdgpu_bo_set_tiling_flags(abo, - tiling_flags); - if (ret) - dev_err(adev->dev, "FB failed to set tiling flags\n"); - } - - ret = amdgpu_bo_pin(abo, domain); - if (ret) { - amdgpu_bo_unreserve(abo); - goto out_unref; - } - - ret = amdgpu_ttm_alloc_gart(&abo->tbo); - if (ret) { - amdgpu_bo_unreserve(abo); - dev_err(adev->dev, "%p bind failed\n", abo); - goto out_unref; - } - - ret = amdgpu_bo_kmap(abo, NULL); - amdgpu_bo_unreserve(abo); - if (ret) { - goto out_unref; - } - - *gobj_p = gobj; - return 0; -out_unref: - amdgpufb_destroy_pinned_object(gobj); - *gobj_p = NULL; - return ret; -} - -static int amdgpufb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; - struct amdgpu_device *adev = rfbdev->adev; - struct fb_info *info; - struct drm_framebuffer *fb = NULL; - struct drm_mode_fb_cmd2 mode_cmd; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - int ret; - - memset(&mode_cmd, 0, sizeof(mode_cmd)); - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); - if (ret) { - DRM_ERROR("failed to create fbcon object %d\n", ret); - return ret; - } - - abo = gem_to_amdgpu_bo(gobj); - - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto out; - } - - ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb, - &mode_cmd, gobj); - if (ret) { - DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out; - } - - fb = &rfbdev->rfb.base; - - /* setup helper */ - rfbdev->helper.fb = fb; - - info->fbops = &amdgpufb_ops; - - info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo); - info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = amdgpu_bo_kptr(abo); - info->screen_size = amdgpu_bo_size(abo); - - drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); - - /* setup aperture base/size for vesafb takeover */ - info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; - info->apertures->ranges[0].size = adev->gmc.aper_size; - - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - - if (info->screen_base == NULL) { - ret = -ENOSPC; - goto out; - } - - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); - DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); - DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); - DRM_INFO("fb depth is %d\n", fb->format->depth); - DRM_INFO(" pitch is %d\n", fb->pitches[0]); - - vga_switcheroo_client_fb_set(adev->pdev, info); - return 0; - -out: - if (fb && ret) { - drm_gem_object_put(gobj); - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - kfree(fb); - } - return ret; -} - -static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) -{ - struct amdgpu_framebuffer *rfb = &rfbdev->rfb; - int i; - - drm_fb_helper_unregister_fbi(&rfbdev->helper); - - if (rfb->base.obj[0]) { - for (i = 0; i < rfb->base.format->num_planes; i++) - drm_gem_object_put(rfb->base.obj[0]); - amdgpufb_destroy_pinned_object(rfb->base.obj[0]); - rfb->base.obj[0] = NULL; - drm_framebuffer_unregister_private(&rfb->base); - drm_framebuffer_cleanup(&rfb->base); - } - drm_fb_helper_fini(&rfbdev->helper); - - return 0; -} - -static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { - .fb_probe = amdgpufb_create, -}; - -int amdgpu_fbdev_init(struct amdgpu_device *adev) -{ - struct amdgpu_fbdev *rfbdev; - int bpp_sel = 32; - int ret; - - /* don't init fbdev on hw without DCE */ - if (!adev->mode_info.mode_config_initialized) - return 0; - - /* don't init fbdev if there are no connectors */ - if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) - return 0; - - /* select 8 bpp console on low vram cards */ - if (adev->gmc.real_vram_size <= (32*1024*1024)) - bpp_sel = 8; - - rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); - if (!rfbdev) - return -ENOMEM; - - rfbdev->adev = adev; - adev->mode_info.rfbdev = rfbdev; - - drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, - &amdgpu_fb_helper_funcs); - - ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); - if (ret) { - kfree(rfbdev); - return ret; - } - - /* disable all the possible outputs/crtcs before entering KMS mode */ - if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) - drm_helper_disable_unused_functions(adev_to_drm(adev)); - - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); - return 0; -} - -void amdgpu_fbdev_fini(struct amdgpu_device *adev) -{ - if (!adev->mode_info.rfbdev) - return; - - amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); - kfree(adev->mode_info.rfbdev); - adev->mode_info.rfbdev = NULL; -} - -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) -{ - if (adev->mode_info.rfbdev) - drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, - state); -} - -int amdgpu_fbdev_total_size(struct amdgpu_device *adev) -{ - struct amdgpu_bo *robj; - int size = 0; - - if (!adev->mode_info.rfbdev) - return 0; - - robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); - size += amdgpu_bo_size(robj); - return size; -} - -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) -{ - if (!adev->mode_info.rfbdev) - return false; - if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) - return true; - return false; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index a25e192e8a3f..3b7e86ea7167 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -550,7 +550,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) drm_sched_stop(&ring->sched, NULL); /* You can't wait for HW to signal if it's gone */ - if (!drm_dev_is_unplugged(&adev->ddev)) + if (!drm_dev_is_unplugged(adev_to_drm(adev))) r = amdgpu_fence_wait_empty(ring); else r = -ENODEV; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 76efd5f8950f..d3e4203f6217 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -34,6 +34,7 @@ #include <asm/set_memory.h> #endif #include "amdgpu.h" +#include <drm/drm_drv.h> /* * GART @@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ uint64_t flags = 0; + int idx; if (!adev->gart.ready) { WARN(1, "trying to unbind memory from uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; for (i = 0; i < pages; i++, p++) { @@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); + drm_dev_exit(idx); return 0; } @@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, { uint64_t page_base; unsigned i, j, t; + int idx; if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return 0; + t = offset / AMDGPU_GPU_PAGE_SIZE; for (i = 0; i < pages; i++) { @@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, page_base += AMDGPU_GPU_PAGE_SIZE; } } + drm_dev_exit(idx); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a1e63ba4c54a..c0d8f40a5b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -877,6 +877,32 @@ out: return r; } +static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, + int width, + int cpp, + bool tiled) +{ + int aligned = width; + int pitch_mask = 0; + + switch (cpp) { + case 1: + pitch_mask = 255; + break; + case 2: + pitch_mask = 127; + break; + case 3: + case 4: + pitch_mask = 63; + break; + } + + aligned += pitch_mask; + aligned &= ~pitch_mask; + return aligned * cpp; +} + int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) @@ -885,7 +911,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; u32 domain; int r; @@ -897,8 +924,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, if (adev->mman.buffer_funcs_enabled) flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; - args->pitch = amdgpu_align_pitch(adev, args->width, - DIV_ROUND_UP(args->bpp, 8), 0); + args->pitch = amdgpu_gem_align_pitch(adev, args->width, + DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); domain = amdgpu_bo_get_preferred_domain(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index e7f06bd0f0cd..1916ec84dd71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -31,6 +31,8 @@ /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) +#define GFX_OFF_NO_DELAY 0 + /* * GPU GFX IP block helpers function. */ @@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev) void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) { + unsigned long delay = GFX_OFF_DELAY_ENABLE; + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return; @@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_off_req_count--; - if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); + if (adev->gfx.gfx_off_req_count == 0 && + !adev->gfx.gfx_off_state) { + /* If going to s2idle, no need to wait */ + if (adev->in_s0ix) + delay = GFX_OFF_NO_DELAY; + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + delay); + } } else { if (adev->gfx.gfx_off_req_count == 0) { cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index d43fe2ed8116..f851196c83a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -42,10 +42,9 @@ #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES -enum gfx_pipe_priority { - AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, - AMDGPU_GFX_PIPE_PRIO_HIGH, - AMDGPU_GFX_PIPE_PRIO_MAX +enum amdgpu_gfx_pipe_priority { + AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, + AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 }; /* Argument for PPSMC_MSG_GpuChangeState */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index c7797eac83c3..08478fce00f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, { void __iomem *ptr = (void *)cpu_pt_addr; uint64_t value; - int idx; - - if (!drm_dev_enter(&adev->ddev, &idx)) - return 0; /* * The following is for PTE only. GART does not have PDEs. @@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, value |= flags; writeq(value, ptr + (gpu_page_idx * 8)); - drm_dev_exit(idx); - return 0; } @@ -598,7 +592,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) break; default: adev->gmc.tmz_enabled = false; - dev_warn(adev->dev, + dev_info(adev->dev, "Trusted Memory Zone (TMZ) feature not supported\n"); break; } @@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; u64 vram_end = vram_addr + vram_size; u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); + int idx; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return; flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; flags |= AMDGPU_PTE_WRITEABLE; @@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; /* Requires gart_ptb_gpu_pa to be 4K aligned */ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); + drm_dev_exit(idx); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index c076a6b9a5a2..bc1297dcdf97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, */ int amdgpu_ib_pool_init(struct amdgpu_device *adev) { - unsigned size; int r, i; if (adev->ib_pool_ready) return 0; for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { - if (i == AMDGPU_IB_POOL_DIRECT) - size = PAGE_SIZE * 6; - else - size = AMDGPU_IB_POOL_SIZE; - r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], - size, AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_SIZE, + AMDGPU_GPU_PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..0c7963dfacad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr; if (!ih->enabled || adev->shutdown) @@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index cc2e0c9cfe0a..4f3c62adccbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -333,7 +333,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4e30a09bc887..bfc47bea23db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -38,7 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_device *adev = ring->adev; int idx; - if (!drm_dev_enter(&adev->ddev, &idx)) { + if (!drm_dev_enter(adev_to_drm(adev), &idx)) { DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", __func__, s_job->sched->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7e45640fbee0..651c7abfde03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -340,28 +340,35 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, case AMDGPU_INFO_FW_TA: switch (query_fw->index) { case TA_FW_TYPE_PSP_XGMI: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.xgmi.feature_version; + fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.xgmi_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAS: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.ras.feature_version; + fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.ras_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_HDCP: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.hdcp.feature_version; + fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.hdcp_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_DTM: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.dtm.feature_version; + fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.dtm_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_RAP: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.rap.feature_version; + fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version; + fw_info->feature = adev->psp.rap_context.context + .bin_desc.feature_version; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: - fw_info->ver = adev->psp.ta_fw_version; - fw_info->feature = adev->psp.securedisplay.feature_version; + fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version; + fw_info->feature = + adev->psp.securedisplay_context.context.bin_desc + .feature_version; break; default: return -EINVAL; @@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = adev->psp.sos.feature_version; break; case AMDGPU_INFO_FW_ASD: - fw_info->ver = adev->psp.asd.fw_version; - fw_info->feature = adev->psp.asd.feature_version; + fw_info->ver = adev->psp.asd_context.bin_desc.fw_version; + fw_info->feature = adev->psp.asd_context.bin_desc.feature_version; break; case AMDGPU_INFO_FW_DMCU: fw_info->ver = adev->dm.dmcu_fw_version; @@ -1416,6 +1423,8 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; + uint8_t smu_minor, smu_debug; + uint16_t smu_major; int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { @@ -1561,8 +1570,11 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; - seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", - fw_info.feature, fw_info.ver); + smu_major = (fw_info.ver >> 16) & 0xffff; + smu_minor = (fw_info.ver >> 8) & 0xff; + smu_debug = (fw_info.ver >> 0) & 0xff; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n", + fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug); /* SDMA */ query_fw.fw_type = AMDGPU_INFO_FW_SDMA; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index a2d3dbbf7d25..ce538f4819f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, unsigned long *error_count) { - uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); + uint64_t mc_status = RREG64_PCIE(mc_status_addr); if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) @@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, unsigned long *error_count) { - uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); + uint64_t mc_status = RREG64_PCIE(mc_status_addr); if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || @@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev, void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr) { - WREG64_PCIE(mc_status_addr * 4, 0x0ULL); + WREG64_PCIE(mc_status_addr, 0x0ULL); } void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, @@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev, if (!mca_dev->ras_if) return -ENOMEM; mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; + mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block; mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - mca_dev->ras_if->sub_block_index = 0; } ih_info.head = fs_info.head = *mca_dev->ras_if; r = amdgpu_ras_late_init(adev, mca_dev->ras_if, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index f860f2f0e296..c74bc7177066 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -29,6 +29,7 @@ struct amdgpu_mca_ras_funcs { void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); uint32_t ras_block; + uint32_t ras_sub_block; const char* sysfs_name; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 89fb372ed49c..6043bf6fd414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -232,8 +232,6 @@ struct amdgpu_i2c_chan { struct mutex mutex; }; -struct amdgpu_fbdev; - struct amdgpu_afmt { bool enabled; int offset; @@ -309,13 +307,6 @@ struct amdgpu_framebuffer { uint64_t address; }; -struct amdgpu_fbdev { - struct drm_fb_helper helper; - struct amdgpu_framebuffer rfb; - struct list_head fbdev_list; - struct amdgpu_device *adev; -}; - struct amdgpu_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; @@ -341,8 +332,6 @@ struct amdgpu_mode_info { struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; - /* pointer to fbdev info structure */ - struct amdgpu_fbdev *rfbdev; /* firmware flags */ u32 firmware_flags; /* pointer to backlight encoder */ @@ -631,15 +620,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); -/* fbdev layer */ -int amdgpu_fbdev_init(struct amdgpu_device *adev); -void amdgpu_fbdev_fini(struct amdgpu_device *adev); -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); -int amdgpu_fbdev_total_size(struct amdgpu_device *adev); -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); - /* amdgpu_display.c */ void amdgpu_display_print_display_setup(struct drm_device *dev); int amdgpu_display_modeset_create_props(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 01a78c786536..3a7b56e57cec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -695,40 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev, } /** - * amdgpu_bo_validate - validate an &amdgpu_bo buffer object - * @bo: pointer to the buffer object - * - * Sets placement according to domain; and changes placement and caching - * policy of the buffer object according to the placement. - * This is used for validating shadow bos. It calls ttm_bo_validate() to - * make sure the buffer is resident where it needs to be. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_validate(struct amdgpu_bo *bo) -{ - struct ttm_operation_ctx ctx = { false, false }; - uint32_t domain; - int r; - - if (bo->tbo.pin_count) - return 0; - - domain = bo->preferred_domains; - -retry: - amdgpu_bo_placement_from_domain(bo, domain); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { - domain = bo->allowed_domains; - goto retry; - } - - return r; -} - -/** * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list * * @vmbo: BO that will be inserted into the shadow list @@ -1038,29 +1004,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) } } -/** - * amdgpu_bo_evict_vram - evict VRAM buffers - * @adev: amdgpu device object - * - * Evicts all VRAM buffers on the lru list of the memory type. - * Mainly used for evicting vram at suspend time. - * - * Returns: - * 0 for success or a negative error code on failure. - */ -int amdgpu_bo_evict_vram(struct amdgpu_device *adev) -{ - struct ttm_resource_manager *man; - - if (adev->in_s3 && (adev->flags & AMD_IS_APU)) { - /* No need to evict vram on APUs for suspend to ram */ - return 0; - } - - man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); - return ttm_resource_manager_evict_all(&adev->mman.bdev, man); -} - static const char *amdgpu_vram_names[] = { "UNKNOWN", "GDDR1", @@ -1089,9 +1032,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev) /* On A+A platform, VRAM can be mapped as WB */ if (!adev->gmc.xgmi.connected_to_cpu) { /* reserve PAT memory space to WC for VRAM */ - arch_io_reserve_memtype_wc(adev->gmc.aper_base, + int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + if (r) { + DRM_ERROR("Unable to set WC memtype for the aperture base\n"); + return r; + } + /* Add an MTRR for the VRAM */ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, adev->gmc.aper_size); @@ -1331,7 +1279,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) abo = ttm_to_amdgpu_bo(bo); if (abo->kfd_bo) - amdgpu_amdkfd_unreserve_memory_limit(abo); + amdgpu_amdkfd_release_notify(abo); /* We only remove the fence if the resv has individualized. */ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9d6c001c15f8..4c9cbdc66995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -304,7 +304,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, u64 min_offset, u64 max_offset); void amdgpu_bo_unpin(struct amdgpu_bo *bo); -int amdgpu_bo_evict_vram(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); @@ -327,7 +326,6 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); -int amdgpu_bo_validate(struct amdgpu_bo *bo); void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, uint64_t *gtt_mem, uint64_t *cpu_mem); void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 9b41cb8c3de5..c641f84649d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -46,6 +46,10 @@ static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); static int psp_load_smu_fw(struct psp_context *psp); +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context); +static int psp_ta_load(struct psp_context *psp, struct ta_context *context); +static int psp_rap_terminate(struct psp_context *psp); +static int psp_securedisplay_terminate(struct psp_context *psp); /* * Due to DF Cstate management centralized to PMFW, the firmware @@ -61,23 +65,32 @@ static int psp_load_smu_fw(struct psp_context *psp); * * This new sequence is required for * - Arcturus and onwards - * - Navi12 and onwards */ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - psp->pmfw_centralized_cstate_management = false; - - if (amdgpu_sriov_vf(adev)) - return; - - if (adev->flags & AMD_IS_APU) + if (amdgpu_sriov_vf(adev)) { + psp->pmfw_centralized_cstate_management = false; return; + } - if ((adev->asic_type >= CHIP_ARCTURUS) || - (adev->asic_type >= CHIP_NAVI12)) + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 4): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(13, 0, 2): psp->pmfw_centralized_cstate_management = true; + break; + default: + psp->pmfw_centralized_cstate_management = false; + break; + } } static int psp_early_init(void *handle) @@ -85,43 +98,45 @@ static int psp_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): psp_v3_1_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_RAVEN: + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): psp_v10_0_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_VEGA20: - case CHIP_ARCTURUS: + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = false; break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = true; break; - case CHIP_RENOIR: + case IP_VERSION(11, 0, 3): + case IP_VERSION(12, 0, 1): psp_v12_0_set_psp_funcs(psp); break; - case CHIP_ALDEBARAN: + case IP_VERSION(13, 0, 2): psp_v13_0_set_psp_funcs(psp); break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): psp_v13_0_set_psp_funcs(psp); psp->autoload_supported = true; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(11, 0, 8): if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { psp_v11_0_8_set_psp_funcs(psp); psp->autoload_supported = false; @@ -264,7 +279,8 @@ static int psp_sw_init(void *handle) DRM_ERROR("Failed to load psp firmware!\n"); return ret; } - } else if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_ALDEBARAN) { + } else if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) { ret = psp_init_ta_microcode(psp, "aldebaran"); if (ret) { DRM_ERROR("Failed to initialize ta microcode!\n"); @@ -307,7 +323,8 @@ static int psp_sw_init(void *handle) } } - if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) { ret= psp_sysfs_init(adev); if (ret) { return ret; @@ -337,8 +354,8 @@ static int psp_sw_fini(void *handle) psp->ta_fw = NULL; } - if (adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_SIENNA_CICHLID) + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) psp_sysfs_fini(adev); kfree(cmd); @@ -424,7 +441,7 @@ psp_cmd_submit_buf(struct psp_context *psp, if (psp->adev->no_hw_access) return 0; - if (!drm_dev_enter(&psp->adev->ddev, &idx)) + if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) return 0; memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); @@ -597,10 +614,10 @@ static int psp_tmr_init(struct psp_context *psp) static bool psp_skip_tmr(struct psp_context *psp) { - switch (psp->adev->asic_type) { - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_ALDEBARAN: + switch (psp->adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(13, 0, 2): return true; default: return false; @@ -778,46 +795,29 @@ static int psp_rl_load(struct amdgpu_device *adev) return ret; } -static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, - uint64_t asd_mc, uint32_t size) +static int psp_asd_load(struct psp_context *psp) { - cmd->cmd_id = GFX_CMD_ID_LOAD_ASD; - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc); - cmd->cmd.cmd_load_ta.app_len = size; - - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0; - cmd->cmd.cmd_load_ta.cmd_buf_len = 0; + return psp_ta_load(psp, &psp->asd_context); } -static int psp_asd_load(struct psp_context *psp) +static int psp_asd_initialize(struct psp_context *psp) { int ret; - struct psp_gfx_cmd_resp *cmd; /* If PSP version doesn't match ASD version, asd loading will be failed. * add workaround to bypass it for sriov now. * TODO: add version check to make it common */ - if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes) + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) return 0; - cmd = acquire_psp_cmd_buf(psp); - - psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes); - - psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, - psp->asd.size_bytes); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - if (!ret) { - psp->asd_context.asd_initialized = true; - psp->asd_context.session_id = cmd->resp.session_id; - } + psp->asd_context.mem_context.shared_mc_addr = 0; + psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; + psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; - release_psp_cmd_buf(psp); + ret = psp_asd_load(psp); + if (!ret) + psp->asd_context.initialized = true; return ret; } @@ -829,27 +829,39 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, cmd->cmd.cmd_unload_ta.session_id = session_id; } +static int psp_ta_unload(struct psp_context *psp, struct ta_context *context) +{ + int ret; + struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); + + psp_prep_ta_unload_cmd_buf(cmd, context->session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + release_psp_cmd_buf(psp); + + return ret; +} + static int psp_asd_unload(struct psp_context *psp) { + return psp_ta_unload(psp, &psp->asd_context); +} + +static int psp_asd_terminate(struct psp_context *psp) +{ int ret; - struct psp_gfx_cmd_resp *cmd; if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->asd_context.asd_initialized) + if (!psp->asd_context.initialized) return 0; - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); + ret = psp_asd_unload(psp); - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); if (!ret) - psp->asd_context.asd_initialized = false; - - release_psp_cmd_buf(psp); + psp->asd_context.initialized = false; return ret; } @@ -885,23 +897,22 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, uint64_t ta_bin_mc, - uint32_t ta_bin_size, - uint64_t ta_shared_mc, - uint32_t ta_shared_size) + struct ta_context *context) { - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd_id = context->ta_load_type; cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); - cmd->cmd.cmd_load_ta.app_len = ta_bin_size; + cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes; - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); - cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(context->mem_context.shared_mc_addr); + cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size; } static int psp_ta_init_shared_buf(struct psp_context *psp, - struct ta_mem_context *mem_ctx, - uint32_t shared_mem_size) + struct ta_mem_context *mem_ctx) { int ret; @@ -909,8 +920,8 @@ static int psp_ta_init_shared_buf(struct psp_context *psp, * Allocate 16k memory aligned to 4k from Frame Buffer (local * physical) for ta to host memory */ - ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, + ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); @@ -926,8 +937,7 @@ static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) static int psp_xgmi_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context, - PSP_XGMI_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context); } static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, @@ -941,12 +951,12 @@ static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, static int psp_ta_invoke(struct psp_context *psp, uint32_t ta_cmd_id, - uint32_t session_id) + struct ta_context *context) { int ret; struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); + psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); @@ -956,31 +966,23 @@ static int psp_ta_invoke(struct psp_context *psp, return ret; } -static int psp_xgmi_load(struct psp_context *psp) +static int psp_ta_load(struct psp_context *psp, struct ta_context *context) { int ret; struct psp_gfx_cmd_resp *cmd; - /* - * TODO: bypass the loading in sriov for now - */ - cmd = acquire_psp_cmd_buf(psp); - psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes); + psp_copy_fw(psp, context->bin_desc.start_addr, + context->bin_desc.size_bytes); - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->xgmi.size_bytes, - psp->xgmi_context.context.mem_context.shared_mc_addr, - PSP_XGMI_SHARED_MEM_SIZE); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); if (!ret) { - psp->xgmi_context.context.initialized = true; - psp->xgmi_context.context.session_id = cmd->resp.session_id; + context->session_id = cmd->resp.session_id; } release_psp_cmd_buf(psp); @@ -988,41 +990,31 @@ static int psp_xgmi_load(struct psp_context *psp) return ret; } -static int psp_xgmi_unload(struct psp_context *psp) +static int psp_xgmi_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - struct amdgpu_device *adev = psp->adev; - - /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ - if (adev->asic_type == CHIP_ARCTURUS || - (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu)) - return 0; - - /* - * TODO: bypass the unloading in sriov for now - */ - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); + return psp_ta_load(psp, &psp->xgmi_context.context); +} - return ret; +static int psp_xgmi_unload(struct psp_context *psp) +{ + return psp_ta_unload(psp, &psp->xgmi_context.context); } int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) { - return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id); + return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context); } int psp_xgmi_terminate(struct psp_context *psp) { int ret; + struct amdgpu_device *adev = psp->adev; + + /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */ + if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + adev->gmc.xgmi.connected_to_cpu)) + return 0; if (!psp->xgmi_context.context.initialized) return 0; @@ -1045,13 +1037,16 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo int ret; if (!psp->ta_fw || - !psp->xgmi.size_bytes || - !psp->xgmi.start_addr) + !psp->xgmi_context.context.bin_desc.size_bytes || + !psp->xgmi_context.context.bin_desc.start_addr) return -ENOENT; if (!load_ta) goto invoke; + psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE; + psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->xgmi_context.context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) @@ -1060,7 +1055,9 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo /* Load XGMI TA */ ret = psp_xgmi_load(psp); - if (ret) + if (!ret) + psp->xgmi_context.context.initialized = true; + else return ret; invoke: @@ -1117,8 +1114,8 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp) { - return psp->adev->asic_type == CHIP_ALDEBARAN && - psp->xgmi.feature_version >= 0x2000000b; + return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) && + psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b; } /* @@ -1282,80 +1279,40 @@ int psp_xgmi_set_topology_info(struct psp_context *psp, // ras begin static int psp_ras_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context, - PSP_RAS_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context); } static int psp_ras_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - struct ta_ras_shared_memory *ras_cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes); - - ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - - if (psp->adev->gmc.xgmi.connected_to_cpu) - ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; - else - ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->ras.size_bytes, - psp->ras_context.context.mem_context.shared_mc_addr, - PSP_RAS_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); - - if (!ret) { - psp->ras_context.context.session_id = cmd->resp.session_id; - - if (!ras_cmd->ras_status) - psp->ras_context.context.initialized = true; - else - dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); - } - - release_psp_cmd_buf(psp); - - if (ret || ras_cmd->ras_status) - amdgpu_ras_fini(psp->adev); - - return ret; + return psp_ta_load(psp, &psp->ras_context.context); } static int psp_ras_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, - psp->fence_buf_mc_addr); + return psp_ta_unload(psp, &psp->ras_context.context); +} - release_psp_cmd_buf(psp); +static void psp_ras_ta_check_status(struct psp_context *psp) +{ + struct ta_ras_shared_memory *ras_cmd = + (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; - return ret; + switch (ras_cmd->ras_status) { + case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + dev_warn(psp->adev->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case TA_RAS_STATUS__SUCCESS: + break; + default: + dev_warn(psp->adev->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } } int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1371,7 +1328,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; - ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id); + ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context); if (amdgpu_ras_intr_triggered()) return ret; @@ -1391,31 +1348,8 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) dev_warn(psp->adev->dev, "RAS internal register access blocked\n"); - } - - return ret; -} -static int psp_ras_status_to_errno(struct amdgpu_device *adev, - enum ta_ras_status ras_status) -{ - int ret = -EINVAL; - - switch (ras_status) { - case TA_RAS_STATUS__SUCCESS: - ret = 0; - break; - case TA_RAS_STATUS__RESET_NEEDED: - ret = -EAGAIN; - break; - case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE: - dev_warn(adev->dev, "RAS WARN: ras function unavailable\n"); - break; - case TA_RAS_STATUS__ERROR_ASD_READ_WRITE: - dev_warn(adev->dev, "RAS WARN: asd read or write failed\n"); - break; - default: - dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret); + psp_ras_ta_check_status(psp); } return ret; @@ -1444,7 +1378,7 @@ int psp_ras_enable_features(struct psp_context *psp, if (ret) return -EINVAL; - return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); + return 0; } static int psp_ras_terminate(struct psp_context *psp) @@ -1477,6 +1411,7 @@ static int psp_ras_initialize(struct psp_context *psp) int ret; uint32_t boot_cfg = 0xFF; struct amdgpu_device *adev = psp->adev; + struct ta_ras_shared_memory *ras_cmd; /* * TODO: bypass the initialize in sriov for now @@ -1484,8 +1419,8 @@ static int psp_ras_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(adev)) return 0; - if (!adev->psp.ras.size_bytes || - !adev->psp.ras.start_addr) { + if (!adev->psp.ras_context.context.bin_desc.size_bytes || + !adev->psp.ras_context.context.bin_desc.start_addr) { dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } @@ -1531,17 +1466,34 @@ static int psp_ras_initialize(struct psp_context *psp) } } + psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE; + psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->ras_context.context.initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) return ret; } + ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf; + memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory)); + + if (amdgpu_ras_is_poison_mode_supported(adev)) + ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; + if (!adev->gmc.xgmi.connected_to_cpu) + ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; + ret = psp_ras_load(psp); - if (ret) - return ret; - return 0; + if (!ret && !ras_cmd->ras_status) + psp->ras_context.context.initialized = true; + else { + if (ras_cmd->ras_status) + dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status); + amdgpu_ras_fini(psp->adev); + } + + return ret; } int psp_ras_trigger_error(struct psp_context *psp, @@ -1568,51 +1520,24 @@ int psp_ras_trigger_error(struct psp_context *psp, if (amdgpu_ras_intr_triggered()) return 0; - return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); + if (ras_cmd->ras_status) + return -EINVAL; + + return 0; } // ras end // HDCP start static int psp_hdcp_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context, - PSP_HDCP_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context); } static int psp_hdcp_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->hdcp.start_addr, - psp->hdcp.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->hdcp.size_bytes, - psp->hdcp_context.context.mem_context.shared_mc_addr, - PSP_HDCP_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->hdcp_context.context.initialized = true; - psp->hdcp_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->hdcp_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->hdcp_context.context); } + static int psp_hdcp_initialize(struct psp_context *psp) { int ret; @@ -1623,12 +1548,15 @@ static int psp_hdcp_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->hdcp.size_bytes || - !psp->hdcp.start_addr) { + if (!psp->hdcp_context.context.bin_desc.size_bytes || + !psp->hdcp_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n"); return 0; } + psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE; + psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->hdcp_context.context.initialized) { ret = psp_hdcp_init_shared_buf(psp); if (ret) @@ -1636,32 +1564,17 @@ static int psp_hdcp_initialize(struct psp_context *psp) } ret = psp_hdcp_load(psp); - if (ret) - return ret; + if (!ret) { + psp->hdcp_context.context.initialized = true; + mutex_init(&psp->hdcp_context.mutex); + } - return 0; + return ret; } static int psp_hdcp_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->hdcp_context.context); } int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1672,7 +1585,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; - return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id); + return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context); } static int psp_hdcp_terminate(struct psp_context *psp) @@ -1709,42 +1622,12 @@ out: // DTM start static int psp_dtm_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context, - PSP_DTM_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context); } static int psp_dtm_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the loading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->dtm.size_bytes, - psp->dtm_context.context.mem_context.shared_mc_addr, - PSP_DTM_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->dtm_context.context.initialized = true; - psp->dtm_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->dtm_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->dtm_context.context); } static int psp_dtm_initialize(struct psp_context *psp) @@ -1757,12 +1640,15 @@ static int psp_dtm_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->dtm.size_bytes || - !psp->dtm.start_addr) { + if (!psp->dtm_context.context.bin_desc.size_bytes || + !psp->dtm_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n"); return 0; } + psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE; + psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->dtm_context.context.initialized) { ret = psp_dtm_init_shared_buf(psp); if (ret) @@ -1770,32 +1656,17 @@ static int psp_dtm_initialize(struct psp_context *psp) } ret = psp_dtm_load(psp); - if (ret) - return ret; + if (!ret) { + psp->dtm_context.context.initialized = true; + mutex_init(&psp->dtm_context.mutex); + } - return 0; + return ret; } static int psp_dtm_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - /* - * TODO: bypass the unloading in sriov for now - */ - if (amdgpu_sriov_vf(psp->adev)) - return 0; - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->dtm_context.context); } int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) @@ -1806,7 +1677,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) if (amdgpu_sriov_vf(psp->adev)) return 0; - return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id); + return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context); } static int psp_dtm_terminate(struct psp_context *psp) @@ -1843,50 +1714,17 @@ out: // RAP start static int psp_rap_init_shared_buf(struct psp_context *psp) { - return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context, - PSP_RAP_SHARED_MEM_SIZE); + return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context); } static int psp_rap_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd; - - psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes); - - cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->rap.size_bytes, - psp->rap_context.context.mem_context.shared_mc_addr, - PSP_RAP_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->rap_context.context.initialized = true; - psp->rap_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->rap_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->rap_context.context); } static int psp_rap_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->rap_context.context); } static int psp_rap_initialize(struct psp_context *psp) @@ -1900,12 +1738,15 @@ static int psp_rap_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->rap.size_bytes || - !psp->rap.start_addr) { + if (!psp->rap_context.context.bin_desc.size_bytes || + !psp->rap_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n"); return 0; } + psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE; + psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->rap_context.context.initialized) { ret = psp_rap_init_shared_buf(psp); if (ret) @@ -1913,16 +1754,15 @@ static int psp_rap_initialize(struct psp_context *psp) } ret = psp_rap_load(psp); - if (ret) + if (!ret) { + psp->rap_context.context.initialized = true; + mutex_init(&psp->rap_context.mutex); + } else return ret; ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status); if (ret || status != TA_RAP_STATUS__SUCCESS) { - psp_rap_unload(psp); - - psp_ta_free_shared_buf(&psp->rap_context.context.mem_context); - - psp->rap_context.context.initialized = false; + psp_rap_terminate(psp); dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n", ret, status); @@ -1971,7 +1811,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat rap_cmd->cmd_id = ta_cmd_id; rap_cmd->validation_method_id = METHOD_A; - ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id); + ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context); if (ret) goto out_unlock; @@ -1989,49 +1829,17 @@ out_unlock: static int psp_securedisplay_init_shared_buf(struct psp_context *psp) { return psp_ta_init_shared_buf( - psp, &psp->securedisplay_context.context.mem_context, - PSP_SECUREDISPLAY_SHARED_MEM_SIZE); + psp, &psp->securedisplay_context.context.mem_context); } static int psp_securedisplay_load(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes); - - psp_prep_ta_load_cmd_buf(cmd, - psp->fw_pri_mc_addr, - psp->securedisplay.size_bytes, - psp->securedisplay_context.context.mem_context.shared_mc_addr, - PSP_SECUREDISPLAY_SHARED_MEM_SIZE); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - if (!ret) { - psp->securedisplay_context.context.initialized = true; - psp->securedisplay_context.context.session_id = cmd->resp.session_id; - mutex_init(&psp->securedisplay_context.mutex); - } - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_load(psp, &psp->securedisplay_context.context); } static int psp_securedisplay_unload(struct psp_context *psp) { - int ret; - struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp); - - psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id); - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - - release_psp_cmd_buf(psp); - - return ret; + return psp_ta_unload(psp, &psp->securedisplay_context.context); } static int psp_securedisplay_initialize(struct psp_context *psp) @@ -2045,12 +1853,16 @@ static int psp_securedisplay_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev)) return 0; - if (!psp->securedisplay.size_bytes || - !psp->securedisplay.start_addr) { + if (!psp->securedisplay_context.context.bin_desc.size_bytes || + !psp->securedisplay_context.context.bin_desc.start_addr) { dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); return 0; } + psp->securedisplay_context.context.mem_context.shared_mem_size = + PSP_SECUREDISPLAY_SHARED_MEM_SIZE; + psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA; + if (!psp->securedisplay_context.context.initialized) { ret = psp_securedisplay_init_shared_buf(psp); if (ret) @@ -2058,7 +1870,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp) } ret = psp_securedisplay_load(psp); - if (ret) + if (!ret) { + psp->securedisplay_context.context.initialized = true; + mutex_init(&psp->securedisplay_context.mutex); + } else return ret; psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, @@ -2066,12 +1881,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp) ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); if (ret) { - psp_securedisplay_unload(psp); - - psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context); - - psp->securedisplay_context.context.initialized = false; - + psp_securedisplay_terminate(psp); dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); return -EINVAL; } @@ -2123,7 +1933,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) mutex_lock(&psp->securedisplay_context.mutex); - ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id); + ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context); mutex_unlock(&psp->securedisplay_context.mutex); @@ -2443,8 +2253,8 @@ static int psp_load_smu_fw(struct psp_context *psp) if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled && - (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_VEGA20))) { + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); if (ret) { DRM_WARN("Failed to set MP1 state prepare for reload\n"); @@ -2541,8 +2351,9 @@ static int psp_load_non_psp_fw(struct psp_context *psp) continue; if (psp->autoload_supported && - (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH) && + (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) || + adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) && (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3)) @@ -2629,7 +2440,7 @@ skip_memalloc: if (ret) goto failed; - ret = psp_asd_load(psp); + ret = psp_asd_initialize(psp); if (ret) { DRM_ERROR("PSP load asd failed!\n"); return ret; @@ -2721,7 +2532,7 @@ static int psp_hw_fini(void *handle) psp_hdcp_terminate(psp); } - psp_asd_unload(psp); + psp_asd_terminate(psp); psp_tmr_terminate(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -2779,9 +2590,9 @@ static int psp_suspend(void *handle) } } - ret = psp_asd_unload(psp); + ret = psp_asd_terminate(psp); if (ret) { - DRM_ERROR("Failed to unload asd\n"); + DRM_ERROR("Failed to terminate asd\n"); return ret; } @@ -2826,12 +2637,18 @@ static int psp_resume(void *handle) if (ret) goto failed; - ret = psp_asd_load(psp); + ret = psp_asd_initialize(psp); if (ret) { DRM_ERROR("PSP load asd failed!\n"); goto failed; } + ret = psp_rl_load(adev); + if (ret) { + dev_err(adev->dev, "PSP load RL failed!\n"); + goto failed; + } + if (adev->gmc.xgmi.num_physical_nodes > 1) { ret = psp_xgmi_initialize(psp, false, true); /* Warning the XGMI seesion initialize failure @@ -2994,10 +2811,10 @@ int psp_init_asd_microcode(struct psp_context *psp, goto out; asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; - adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); - adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); - adev->psp.asd.start_addr = (uint8_t *)asd_hdr + + adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version); + adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version); + adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes); + adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); return 0; out: @@ -3129,7 +2946,8 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev) ucode_array_start_addr = (uint8_t *)sos_hdr + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); - if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) { + if (adev->gmc.xgmi.connected_to_cpu || + (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) { adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version); adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version); @@ -3284,40 +3102,43 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, switch (desc->fw_type) { case TA_FW_TYPE_PSP_ASD: - psp->asd.fw_version = le32_to_cpu(desc->fw_version); - psp->asd.feature_version = le32_to_cpu(desc->fw_version); - psp->asd.size_bytes = le32_to_cpu(desc->size_bytes); - psp->asd.start_addr = ucode_start_addr; + psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version); + psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->asd_context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_XGMI: - psp->xgmi.feature_version = le32_to_cpu(desc->fw_version); - psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes); - psp->xgmi.start_addr = ucode_start_addr; + psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAS: - psp->ras.feature_version = le32_to_cpu(desc->fw_version); - psp->ras.size_bytes = le32_to_cpu(desc->size_bytes); - psp->ras.start_addr = ucode_start_addr; + psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ras_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_HDCP: - psp->hdcp.feature_version = le32_to_cpu(desc->fw_version); - psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes); - psp->hdcp.start_addr = ucode_start_addr; + psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_DTM: - psp->dtm.feature_version = le32_to_cpu(desc->fw_version); - psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes); - psp->dtm.start_addr = ucode_start_addr; + psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_RAP: - psp->rap.feature_version = le32_to_cpu(desc->fw_version); - psp->rap.size_bytes = le32_to_cpu(desc->size_bytes); - psp->rap.start_addr = ucode_start_addr; + psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version); + psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes); + psp->rap_context.context.bin_desc.start_addr = ucode_start_addr; break; case TA_FW_TYPE_PSP_SECUREDISPLAY: - psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version); - psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes); - psp->securedisplay.start_addr = ucode_start_addr; + psp->securedisplay_context.context.bin_desc.fw_version = + le32_to_cpu(desc->fw_version); + psp->securedisplay_context.context.bin_desc.size_bytes = + le32_to_cpu(desc->size_bytes); + psp->securedisplay_context.context.bin_desc.start_addr = + ucode_start_addr; break; default: dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); @@ -3478,7 +3299,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size { int idx; - if (!drm_dev_enter(&psp->adev->ddev, &idx)) + if (!drm_dev_enter(adev_to_drm(psp->adev), &idx)) return; memset(psp->fw_pri_buf, 0, PSP_1_MEG); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 8ef2d28af92a..f29afabbff1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -34,17 +34,20 @@ #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 -#define PSP_XGMI_SHARED_MEM_SIZE 0x4000 -#define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000) -#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 -#define PSP_DTM_SHARED_MEM_SIZE 0x4000 -#define PSP_RAP_SHARED_MEM_SIZE 0x4000 -#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000 -#define PSP_SHARED_MEM_SIZE 0x4000 #define PSP_FW_NAME_LEN 0x24 +enum psp_shared_mem_size { + PSP_ASD_SHARED_MEM_SIZE = 0x0, + PSP_XGMI_SHARED_MEM_SIZE = 0x4000, + PSP_RAS_SHARED_MEM_SIZE = 0x4000, + PSP_HDCP_SHARED_MEM_SIZE = 0x4000, + PSP_DTM_SHARED_MEM_SIZE = 0x4000, + PSP_RAP_SHARED_MEM_SIZE = 0x4000, + PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000, +}; + struct psp_context; struct psp_xgmi_node_info; struct psp_xgmi_topology_info; @@ -131,21 +134,26 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; -struct psp_asd_context { - bool asd_initialized; - uint32_t session_id; +struct psp_bin_desc { + uint32_t fw_version; + uint32_t feature_version; + uint32_t size_bytes; + uint8_t *start_addr; }; struct ta_mem_context { struct amdgpu_bo *shared_bo; uint64_t shared_mc_addr; void *shared_buf; + enum psp_shared_mem_size shared_mem_size; }; struct ta_context { bool initialized; uint32_t session_id; struct ta_mem_context mem_context; + struct psp_bin_desc bin_desc; + enum psp_gfx_cmd_id ta_load_type; }; struct ta_cp_context { @@ -263,13 +271,6 @@ struct psp_runtime_boot_cfg_entry { uint32_t reserved; }; -struct psp_bin_desc { - uint32_t fw_version; - uint32_t feature_version; - uint32_t size_bytes; - uint8_t *start_addr; -}; - struct psp_context { struct amdgpu_device *adev; @@ -301,7 +302,6 @@ struct psp_context /* asd firmware */ const struct firmware *asd_fw; - struct psp_bin_desc asd; /* toc firmware */ const struct firmware *toc_fw; @@ -326,14 +326,8 @@ struct psp_context /* xgmi ta firmware and buffer */ const struct firmware *ta_fw; uint32_t ta_fw_version; - struct psp_bin_desc xgmi; - struct psp_bin_desc ras; - struct psp_bin_desc hdcp; - struct psp_bin_desc dtm; - struct psp_bin_desc rap; - struct psp_bin_desc securedisplay; - - struct psp_asd_context asd_context; + + struct ta_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras_context; struct ta_cp_context hdcp_context; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 96a8fd0ca1df..46910e7b2927 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -35,7 +35,11 @@ #include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include "atom.h" +#ifdef CONFIG_X86_MCE_AMD +#include <asm/mce.h> +static bool notifier_registered; +#endif static const char *RAS_FS_NAME = "ras"; const char *ras_error_string[] = { @@ -61,8 +65,30 @@ const char *ras_block_string[] = { "mp0", "mp1", "fuse", + "mca", }; +const char *ras_mca_block_string[] = { + "mca_mp0", + "mca_mp1", + "mca_mpio", + "mca_iohc", +}; + +const char *get_ras_block_str(struct ras_common_if *ras_block) +{ + if (!ras_block) + return "NULL"; + + if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT) + return "OUT OF RANGE"; + + if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) + return ras_mca_block_string[ras_block->sub_block_index]; + + return ras_block_string[ras_block->block]; +} + #define ras_err_str(i) (ras_error_string[ffs(i)]) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) @@ -85,6 +111,14 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, uint64_t addr); static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, uint64_t addr); +#ifdef CONFIG_X86_MCE_AMD +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); +struct mce_notifier_adev_list { + struct amdgpu_device *devs[MAX_GPU_INSTANCE]; + int num_gpu; +}; +static struct mce_notifier_adev_list mce_adev_list; +#endif void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) { @@ -187,7 +221,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { *block_id = i; - if (strcmp(name, ras_block_str(i)) == 0) + if (strcmp(name, ras_block_string[i]) == 0) return 0; } return -EINVAL; @@ -509,7 +543,6 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) DRM_WARN("Failed to reset error counter and error status"); @@ -529,7 +562,7 @@ static inline void put_obj(struct ras_manager *obj) if (obj && (--obj->use == 0)) list_del(&obj->node); if (obj && (obj->use < 0)) - DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block)); + DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head)); } /* make one obj and return it. */ @@ -545,7 +578,14 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; + + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; + /* already exist. return obj? */ if (alive_obj(obj)) return NULL; @@ -573,19 +613,21 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL; - obj = &con->objs[head->block]; + if (head->block == AMDGPU_RAS_BLOCK__MCA) { + if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) + return NULL; - if (alive_obj(obj)) { - WARN_ON(head->block != obj->head.block); + obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; + } else + obj = &con->objs[head->block]; + + if (alive_obj(obj)) return obj; - } } else { - for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { obj = &con->objs[i]; - if (alive_obj(obj)) { - WARN_ON(i != obj->head.block); + if (alive_obj(obj)) return obj; - } } } @@ -626,8 +668,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_feature_allowed(adev, head)) return 0; - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) - return 0; if (enable) { if (!obj) { @@ -678,19 +718,14 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, /* Do not enable if it is not allowed. */ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); - /* Are we alerady in that state we are going to set? */ - if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) { - ret = 0; - goto out; - } if (!amdgpu_ras_intr_triggered()) { ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { - dev_err(adev->dev, "ras %s %s failed %d\n", + dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n", enable ? "enable":"disable", - ras_block_str(head->block), - ret); + get_ras_block_str(head), + amdgpu_ras_is_poison_mode_supported(adev), ret); goto out; } } @@ -731,7 +766,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, if (!ret) dev_info(adev->dev, "RAS INFO: %s setup object\n", - ras_block_str(head->block)); + get_ras_block_str(head)); } } else { /* setup the object then issue a ras TA disable cmd.*/ @@ -781,17 +816,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, bool bypass) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; int i; - const enum amdgpu_ras_error_type default_ras_type = - AMDGPU_RAS_ERROR__NONE; + const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; - for (i = 0; i < ras_block_count; i++) { + for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { struct ras_common_if head = { .block = i, .type = default_ras_type, .sub_block_index = 0, }; + + if (i == AMDGPU_RAS_BLOCK__MCA) + continue; + + if (bypass) { + /* + * bypass psp. vbios enable ras for us. + * so just create the obj + */ + if (__amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } else { + if (amdgpu_ras_feature_enable(adev, &head, 1)) + break; + } + } + + for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { + struct ras_common_if head = { + .block = AMDGPU_RAS_BLOCK__MCA, + .type = default_ras_type, + .sub_block_index = i, + }; + if (bypass) { /* * bypass psp. vbios enable ras for us. @@ -809,6 +866,64 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ + +void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_err_data *err_data) +{ + switch (ras_block->sub_block_index) { + case AMDGPU_RAS_MCA_BLOCK__MP0: + if (adev->mca.mp0.ras_funcs && + adev->mca.mp0.ras_funcs->query_ras_error_count) + adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MP1: + if (adev->mca.mp1.ras_funcs && + adev->mca.mp1.ras_funcs->query_ras_error_count) + adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_MCA_BLOCK__MPIO: + if (adev->mca.mpio.ras_funcs && + adev->mca.mpio.ras_funcs->query_ras_error_count) + adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data); + break; + default: + break; + } +} + +static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; + + /* + * choosing right query method according to + * whether smu support query error information + */ + ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, err_data); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address) + adev->umc.ras_funcs->query_ras_error_address(adev, err_data); + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address) + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data); + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -822,15 +937,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address) - adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); + amdgpu_ras_get_ecc_info(adev, &err_data); break; case AMDGPU_RAS_BLOCK__SDMA: if (adev->sdma.funcs->query_ras_error_count) { @@ -872,6 +979,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->hdp.ras_funcs->query_ras_error_count) adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__MCA: + amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); + break; default: break; } @@ -893,13 +1003,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), obj->err_data.ce_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } else { dev_info(adev->dev, "%ld correctable hardware errors " "detected in %s block, no user " "action is needed.\n", obj->err_data.ce_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } } if (err_data.ue_count) { @@ -912,15 +1022,18 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->smuio.funcs->get_socket_id(adev), adev->smuio.funcs->get_die_id(adev), obj->err_data.ue_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } else { dev_info(adev->dev, "%ld uncorrectable hardware errors " "detected in %s block\n", obj->err_data.ue_count, - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); } } + if (!amdgpu_persistent_edc_harvesting_supported(adev)) + amdgpu_ras_reset_error_status(adev, info->head.block); + return 0; } @@ -1027,6 +1140,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, case AMDGPU_RAS_BLOCK__SDMA: case AMDGPU_RAS_BLOCK__MMHUB: case AMDGPU_RAS_BLOCK__PCIE_BIF: + case AMDGPU_RAS_BLOCK__MCA: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; case AMDGPU_RAS_BLOCK__XGMI_WAFL: @@ -1034,13 +1148,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, break; default: dev_info(adev->dev, "%s error injection is not supported yet\n", - ras_block_str(info->head.block)); + get_ras_block_str(&info->head)); ret = -EINVAL; } if (ret) dev_err(adev->dev, "ras inject %s failed %d\n", - ras_block_str(info->head.block), ret); + get_ras_block_str(&info->head), ret); return ret; } @@ -1383,7 +1497,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) if (amdgpu_ras_is_supported(adev, obj->head.block) && (obj->attr_inuse == 1)) { sprintf(fs_info.debugfs_name, "%s_err_inject", - ras_block_str(obj->head.block)); + get_ras_block_str(&obj->head)); fs_info.head = obj->head; amdgpu_ras_debugfs_create(adev, &fs_info, dir); } @@ -1469,22 +1583,28 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) data->rptr = (data->aligned_element_size + data->rptr) % data->ring_size; - /* Let IP handle its data, maybe we need get the output - * from the callback to udpate the error type/count, etc - */ if (data->cb) { - ret = data->cb(obj->adev, &err_data, &entry); - /* ue will trigger an interrupt, and in that case - * we need do a reset to recovery the whole system. - * But leave IP do that recovery, here we just dispatch - * the error. - */ - if (ret == AMDGPU_RAS_SUCCESS) { - /* these counts could be left as 0 if - * some blocks do not count error number + if (amdgpu_ras_is_poison_mode_supported(obj->adev) && + obj->head.block == AMDGPU_RAS_BLOCK__UMC) + dev_info(obj->adev->dev, + "Poison is created, no user action is needed.\n"); + else { + /* Let IP handle its data, maybe we need get the output + * from the callback to udpate the error type/count, etc */ - obj->err_data.ue_count += err_data.ue_count; - obj->err_data.ce_count += err_data.ce_count; + ret = data->cb(obj->adev, &err_data, &entry); + /* ue will trigger an interrupt, and in that case + * we need do a reset to recovery the whole system. + * But leave IP do that recovery, here we just dispatch + * the error. + */ + if (ret == AMDGPU_RAS_SUCCESS) { + /* these counts could be left as 0 if + * some blocks do not count error number + */ + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + } } } } @@ -1839,9 +1959,11 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) if (!con || !con->eh_data) return 0; + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; save_count = data->count - control->ras_num_recs; + mutex_unlock(&con->recovery_lock); /* only new entries are saved */ if (save_count > 0) { if (amdgpu_ras_eeprom_append(control, @@ -2014,6 +2136,11 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); } +#ifdef CONFIG_X86_MCE_AMD + if ((adev->asic_type == CHIP_ALDEBARAN) && + (adev->gmc.xgmi.connected_to_cpu)) + amdgpu_register_bad_pages_mca_notifier(adev); +#endif return 0; free: @@ -2056,19 +2183,6 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) } /* recovery end */ -/* return 0 if ras will reset gpu and repost.*/ -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block) -{ - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - - if (!ras) - return -EINVAL; - - ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET; - return 0; -} - static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { return adev->asic_type == CHIP_VEGA10 || @@ -2176,12 +2290,14 @@ int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int r; + bool df_poison, umc_poison; if (con) return 0; con = kmalloc(sizeof(struct amdgpu_ras) + - sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT, + sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + + sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, GFP_KERNEL|__GFP_ZERO); if (!con) return -ENOMEM; @@ -2245,6 +2361,23 @@ int amdgpu_ras_init(struct amdgpu_device *adev) goto release_con; } + /* Init poison supported flag, the default value is false */ + if (adev->df.funcs && + adev->df.funcs->query_ras_poison_mode && + adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_poison_mode) { + df_poison = + adev->df.funcs->query_ras_poison_mode(adev); + umc_poison = + adev->umc.ras_funcs->query_ras_poison_mode(adev); + /* Only poison is set in both DF and UMC, we can support it */ + if (df_poison && umc_poison) + con->poison_supported = true; + else if (df_poison != umc_poison) + dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n", + df_poison, umc_poison); + } + if (amdgpu_ras_fs_init(adev)) { r = -EINVAL; goto release_con; @@ -2288,6 +2421,16 @@ static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, return 0; } +bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!con) + return false; + + return con->poison_supported; +} + /* helper function to handle common stuff in ip late init phase */ int amdgpu_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block, @@ -2306,12 +2449,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); if (r) { - if (r == -EAGAIN) { - /* request gpu reset. will run again */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } else if (adev->in_suspend || amdgpu_in_reset(adev)) { + if (adev->in_suspend || amdgpu_in_reset(adev)) { /* in resume phase, if fail to enable ras, * clean up all ras fs nodes, and disable ras */ goto cleanup; @@ -2403,19 +2541,6 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) } } } - - if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) { - con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET; - /* setup ras obj state as disabled. - * for init_by_vbios case. - * if we want to enable ras, just enable it in a normal way. - * If we want do disable it, need setup ras obj as enabled, - * then issue another TA disable cmd. - * See feature_enable_on_boot - */ - amdgpu_ras_disable_all_features(adev, 1); - amdgpu_ras_reset_gpu(adev); - } } void amdgpu_ras_suspend(struct amdgpu_device *adev) @@ -2507,3 +2632,136 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev) kfree(con); } } + +#ifdef CONFIG_X86_MCE_AMD +static struct amdgpu_device *find_adev(uint32_t node_id) +{ + int i; + struct amdgpu_device *adev = NULL; + + for (i = 0; i < mce_adev_list.num_gpu; i++) { + adev = mce_adev_list.devs[i]; + + if (adev && adev->gmc.xgmi.connected_to_cpu && + adev->gmc.xgmi.physical_node_id == node_id) + break; + adev = NULL; + } + + return adev; +} + +#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) +#define GET_UMC_INST(m) (((m) >> 21) & 0x7) +#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) +#define GPU_ID_OFFSET 8 + +static int amdgpu_bad_page_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct mce *m = (struct mce *)data; + struct amdgpu_device *adev = NULL; + uint32_t gpu_id = 0; + uint32_t umc_inst = 0; + uint32_t ch_inst, channel_index = 0; + struct ras_err_data err_data = {0, 0, 0, NULL}; + struct eeprom_table_record err_rec; + uint64_t retired_page; + + /* + * If the error was generated in UMC_V2, which belongs to GPU UMCs, + * and error occurred in DramECC (Extended error code = 0) then only + * process the error, else bail out. + */ + if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) && + (XEC(m->status, 0x3f) == 0x0))) + return NOTIFY_DONE; + + /* + * If it is correctable error, return. + */ + if (mce_is_correctable(m)) + return NOTIFY_OK; + + /* + * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. + */ + gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; + + adev = find_adev(gpu_id); + if (!adev) { + DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__, + gpu_id); + return NOTIFY_DONE; + } + + /* + * If it is uncorrectable error, then find out UMC instance and + * channel index. + */ + umc_inst = GET_UMC_INST(m->ipid); + ch_inst = GET_CHAN_INDEX(m->ipid); + + dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", + umc_inst, ch_inst); + + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); + + /* + * Translate UMC channel address to Physical address + */ + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + + ch_inst]; + + retired_page = ADDR_OF_8KB_BLOCK(m->addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(m->addr); + + err_rec.address = m->addr; + err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec.ts = (uint64_t)ktime_get_real_seconds(); + err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec.cu = 0; + err_rec.mem_channel = channel_index; + err_rec.mcumc_id = umc_inst; + + err_data.err_addr = &err_rec; + err_data.err_addr_cnt = 1; + + if (amdgpu_bad_page_threshold != 0) { + amdgpu_ras_add_bad_pages(adev, err_data.err_addr, + err_data.err_addr_cnt); + amdgpu_ras_save_bad_pages(adev); + } + + return NOTIFY_OK; +} + +static struct notifier_block amdgpu_bad_page_nb = { + .notifier_call = amdgpu_bad_page_notifier, + .priority = MCE_PRIO_UC, +}; + +static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) +{ + /* + * Add the adev to the mce_adev_list. + * During mode2 reset, amdgpu device is temporarily + * removed from the mgpu_info list which can cause + * page retirement to fail. + * Use this list instead of mgpu_info to find the amdgpu + * device on which the UMC error was reported. + */ + mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; + + /* + * Register the x86 notifier only once + * with MCE subsystem. + */ + if (notifier_registered == false) { + mce_register_decode_chain(&amdgpu_bad_page_nb); + notifier_registered = true; + } +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index eae604fd90b8..1c708122d492 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -32,7 +32,6 @@ #include "amdgpu_ras_eeprom.h" #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) -#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1) enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, @@ -49,15 +48,22 @@ enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__MP0, AMDGPU_RAS_BLOCK__MP1, AMDGPU_RAS_BLOCK__FUSE, - AMDGPU_RAS_BLOCK__MPIO, + AMDGPU_RAS_BLOCK__MCA, AMDGPU_RAS_BLOCK__LAST }; -extern const char *ras_block_string[]; +enum amdgpu_ras_mca_block { + AMDGPU_RAS_MCA_BLOCK__MP0 = 0, + AMDGPU_RAS_MCA_BLOCK__MP1, + AMDGPU_RAS_MCA_BLOCK__MPIO, + AMDGPU_RAS_MCA_BLOCK__IOHC, + + AMDGPU_RAS_MCA_BLOCK__LAST +}; -#define ras_block_str(i) (ras_block_string[i]) #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST +#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST #define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1) enum amdgpu_ras_gfx_subblock { @@ -313,6 +319,19 @@ struct ras_common_if { char name[32]; }; +#define MAX_UMC_CHANNEL_NUM 32 + +struct ecc_info_per_ch { + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + uint64_t mca_umc_status; + uint64_t mca_umc_addr; +}; + +struct umc_ecc_info { + struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM]; +}; + struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -345,10 +364,16 @@ struct amdgpu_ras { /* disable ras error count harvest in recovery */ bool disable_ras_err_cnt_harvest; + /* is poison mode supported */ + bool poison_supported; + /* RAS count errors delayed work */ struct delayed_work ras_counte_delay_work; atomic_t ras_ue_count; atomic_t ras_ce_count; + + /* record umc error info queried from smu */ + struct umc_ecc_info umc_ecc; }; struct ras_fs_data { @@ -488,8 +513,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, } int amdgpu_ras_recovery_init(struct amdgpu_device *adev); -int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, - unsigned int block); void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); @@ -544,6 +567,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { return TA_RAS_BLOCK__MP1; case AMDGPU_RAS_BLOCK__FUSE: return TA_RAS_BLOCK__FUSE; + case AMDGPU_RAS_BLOCK__MCA: + return TA_RAS_BLOCK__MCA; default: WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); return TA_RAS_BLOCK__UMC; @@ -638,4 +663,8 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev); int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev); +const char *get_ras_block_str(struct ras_common_if *ras_block); + +bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index dc44c946a244..05117eda105b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -757,7 +757,7 @@ Out: return res; } -inline uint32_t amdgpu_ras_eeprom_max_record_count(void) +uint32_t amdgpu_ras_eeprom_max_record_count(void) { return RAS_MAX_RECORD_COUNT; } @@ -1077,6 +1077,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, if (res) DRM_ERROR("RAS table incorrect checksum or error:%d\n", res); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold) + dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d", + control->ras_num_recs, + ras->bad_page_cnt_threshold); } else if (hdr->header == RAS_TABLE_HDR_BAD && amdgpu_bad_page_threshold != 0) { res = __verify_ras_table_checksum(control); @@ -1098,11 +1105,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, res = amdgpu_ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL); } else { - *exceed_err_limit = true; - dev_err(adev->dev, - "RAS records:%d exceed threshold:%d, " - "maybe retire this GPU?", + dev_err(adev->dev, "RAS records:%d exceed threshold:%d", control->ras_num_recs, ras->bad_page_cnt_threshold); + if (amdgpu_bad_page_threshold == -2) { + dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2."); + res = 0; + } else { + *exceed_err_limit = true; + dev_err(adev->dev, + "RAS records:%d exceed threshold:%d, " + "GPU will not be initialized. Replace this GPU or increase the threshold", + control->ras_num_recs, ras->bad_page_cnt_threshold); + } } } else { DRM_INFO("Creating a new EEPROM table"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index f95fc61b3021..6bb00578bfbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, const u32 num); -inline uint32_t amdgpu_ras_eeprom_max_record_count(void); +uint32_t amdgpu_ras_eeprom_max_record_count(void); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 7b634a1517f9..ab2351ba9574 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -415,26 +415,20 @@ static const struct file_operations amdgpu_debugfs_ring_fops = { #endif -int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring) +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; - struct dentry *ent, *root = minor->debugfs_root; + struct dentry *root = minor->debugfs_root; char name[32]; sprintf(name, "amdgpu_ring_%s", ring->name); + debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, + &amdgpu_debugfs_ring_fops, + ring->ring_size + 12); - ent = debugfs_create_file(name, - S_IFREG | S_IRUGO, root, - ring, &amdgpu_debugfs_ring_fops); - if (!ent) - return -ENOMEM; - - i_size_write(ent->d_inode, ring->ring_size + 12); - ring->ent = ent; #endif - return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index e713d31619fe..4d380e79752c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -36,8 +36,13 @@ #define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_UVD_ENC_RINGS 2 -#define AMDGPU_RING_PRIO_DEFAULT 1 -#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX +enum amdgpu_ring_priority_level { + AMDGPU_RING_PRIO_0, + AMDGPU_RING_PRIO_1, + AMDGPU_RING_PRIO_DEFAULT = 1, + AMDGPU_RING_PRIO_2, + AMDGPU_RING_PRIO_MAX +}; /* some special values for the owner field */ #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) @@ -248,10 +253,6 @@ struct amdgpu_ring { bool has_compute_vm_bug; bool no_scheduler; int hw_prio; - -#if defined(CONFIG_DEBUG_FS) - struct dentry *ent; -#endif }; #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) @@ -351,8 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, int amdgpu_ring_test_helper(struct amdgpu_ring *ring); -int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, - struct amdgpu_ring *ring); -void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); - +void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, + struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index b7d861ed5284..e9b45089a28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -32,37 +32,9 @@ #include "amdgpu_sched.h" #include "amdgpu_vm.h" -int amdgpu_to_sched_priority(int amdgpu_priority, - enum drm_sched_priority *prio) -{ - switch (amdgpu_priority) { - case AMDGPU_CTX_PRIORITY_VERY_HIGH: - *prio = DRM_SCHED_PRIORITY_HIGH; - break; - case AMDGPU_CTX_PRIORITY_HIGH: - *prio = DRM_SCHED_PRIORITY_HIGH; - break; - case AMDGPU_CTX_PRIORITY_NORMAL: - *prio = DRM_SCHED_PRIORITY_NORMAL; - break; - case AMDGPU_CTX_PRIORITY_LOW: - case AMDGPU_CTX_PRIORITY_VERY_LOW: - *prio = DRM_SCHED_PRIORITY_MIN; - break; - case AMDGPU_CTX_PRIORITY_UNSET: - *prio = DRM_SCHED_PRIORITY_UNSET; - break; - default: - WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return -EINVAL; - } - - return 0; -} - static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -89,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, int fd, unsigned ctx_id, - enum drm_sched_priority priority) + int32_t priority) { struct fd f = fdget(fd); struct amdgpu_fpriv *fpriv; @@ -124,7 +96,6 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_sched *args = data; struct amdgpu_device *adev = drm_to_adev(dev); - enum drm_sched_priority priority; int r; /* First check the op, then the op's argument. @@ -138,21 +109,22 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - r = amdgpu_to_sched_priority(args->in.priority, &priority); - if (r) - return r; + if (!amdgpu_ctx_priority_is_valid(args->in.priority)) { + WARN(1, "Invalid context priority %d\n", args->in.priority); + return -EINVAL; + } switch (args->in.op) { case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE: r = amdgpu_sched_process_priority_override(adev, args->in.fd, - priority); + args->in.priority); break; case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: r = amdgpu_sched_context_priority_override(adev, args->in.fd, args->in.ctx_id, - priority); + args->in.priority); break; default: /* Impossible. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 862eb3c1c4c5..f7d8487799b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -252,41 +252,25 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct dma_resv *resv, enum amdgpu_sync_mode mode, void *owner) { - struct dma_resv_list *flist; + struct dma_resv_iter cursor; struct dma_fence *f; - unsigned i; - int r = 0; + int r; if (resv == NULL) return -EINVAL; - /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - dma_fence_chain_for_each(f, f) { - struct dma_fence_chain *chain = to_dma_fence_chain(f); - - if (amdgpu_sync_test_fence(adev, mode, owner, chain ? - chain->fence : f)) { - r = amdgpu_sync_fence(sync, f); - dma_fence_put(f); - if (r) - return r; - break; - } - } - - flist = dma_resv_shared_list(resv); - if (!flist) - return 0; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); - - if (amdgpu_sync_test_fence(adev, mode, owner, f)) { - r = amdgpu_sync_fence(sync, f); - if (r) - return r; + dma_resv_for_each_fence(&cursor, resv, true, f) { + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); + + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e8d70b6e6737..fb0d8bffdce2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -41,6 +41,7 @@ #include <linux/swiotlb.h> #include <linux/dma-buf.h> #include <linux/sizes.h> +#include <linux/module.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -59,6 +60,8 @@ #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" +MODULE_IMPORT_NS(DMA_BUF); + #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, @@ -113,17 +116,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo = ttm_to_amdgpu_bo(bo); if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) { - struct dma_fence *fence; - struct dma_resv *resv = &bo->base._resv; - - rcu_read_lock(); - fence = rcu_dereference(resv->fence_excl); - if (fence && !fence->ops->signaled) - dma_fence_enable_sw_signaling(fence); - placement->num_placement = 0; placement->num_busy_placement = 0; - rcu_read_unlock(); return; } @@ -515,6 +509,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto out; } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + if (adev->mman.buffer_funcs_enabled) { if (((old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) || @@ -545,15 +548,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - out: /* update statistics */ atomic64_add(bo->base.size, &adev->num_bytes_moved); @@ -696,6 +690,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) true, NULL); out_unlock: mmap_read_unlock(mm); + if (r) + pr_debug("failed %d to get user pages 0x%lx\n", r, start); + mmput(mm); return r; @@ -916,11 +913,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, ttm->num_pages, bo_mem, ttm); } - if (bo_mem->mem_type == AMDGPU_PL_GDS || - bo_mem->mem_type == AMDGPU_PL_GWS || - bo_mem->mem_type == AMDGPU_PL_OA) - return -EINVAL; - if (bo_mem->mem_type != TTM_PL_TT || !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; @@ -1235,7 +1227,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) * */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end) + unsigned long end, unsigned long *userptr) { struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long size; @@ -1250,6 +1242,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, if (gtt->userptr > end || gtt->userptr + size <= start) return false; + if (userptr) + *userptr = gtt->userptr; return true; } @@ -1345,10 +1339,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { unsigned long num_pages = bo->resource->num_pages; + struct dma_resv_iter resv_cursor; struct amdgpu_res_cursor cursor; - struct dma_resv_list *flist; struct dma_fence *f; - int i; /* Swapout? */ if (bo->resource->mem_type == TTM_PL_SYSTEM) @@ -1362,14 +1355,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_shared_list(bo->base.resv); - if (flist) { - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(bo->base.resv)); - if (amdkfd_fence_check_mm(f, current->mm)) - return false; - } + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { + if (amdkfd_fence_check_mm(f, current->mm)) + return false; } switch (bo->resource->mem_type) { @@ -2049,6 +2037,36 @@ error_free: return r; } +/** + * amdgpu_ttm_evict_resources - evict memory buffers + * @adev: amdgpu device object + * @mem_type: evicted BO's memory type + * + * Evicts all @mem_type buffers on the lru list of the memory type. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) +{ + struct ttm_resource_manager *man; + + switch (mem_type) { + case TTM_PL_VRAM: + case TTM_PL_TT: + case AMDGPU_PL_GWS: + case AMDGPU_PL_GDS: + case AMDGPU_PL_OA: + man = ttm_manager_type(&adev->mman.bdev, mem_type); + break; + default: + DRM_ERROR("Trying to evict invalid memory type\n"); + return -EINVAL; + } + + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 3205fd520060..7346ecff4438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end); + unsigned long end, unsigned long *userptr); bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); @@ -190,6 +190,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_resource *mem); +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type); void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index abd8469380e5..ca3350502618 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -416,10 +416,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) else return AMDGPU_FW_LOAD_PSP; default: - DRM_ERROR("Unknown firmware load type\n"); + if (!load_type) + return AMDGPU_FW_LOAD_DIRECT; + else + return AMDGPU_FW_LOAD_PSP; } - - return AMDGPU_FW_LOAD_DIRECT; } const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id) @@ -508,7 +509,7 @@ static ssize_t show_##name(struct device *dev, \ struct drm_device *ddev = dev_get_drvdata(dev); \ struct amdgpu_device *adev = drm_to_adev(ddev); \ \ - return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \ + return sysfs_emit(buf, "0x%08x\n", adev->field); \ } \ static DEVICE_ATTR(name, mode, show_##name, NULL) @@ -525,9 +526,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version); FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); -FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version); -FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version); -FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version); +FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version); +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version); FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version); FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version); FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version); @@ -572,6 +573,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL; const struct mes_firmware_header_v1_0 *mes_hdr = NULL; + u8 *ucode_addr; if (NULL == ucode->fw) return 0; @@ -588,94 +590,83 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data; mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || - (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES_DATA && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV && - ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - - le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) { - ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(cp_hdr->jt_offset) * 4), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { - ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_CP_MEC1: + case AMDGPU_UCODE_ID_CP_MEC2: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + case AMDGPU_UCODE_ID_CP_MEC2_JT: + ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4; + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(cp_hdr->jt_offset) * 4; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_cntl; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_gpm; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; + ucode_addr = adev->gfx.rlc.save_restore_list_srm; + break; + case AMDGPU_UCODE_ID_RLC_IRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_iram_ucode; + break; + case AMDGPU_UCODE_ID_RLC_DRAM: + ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; + ucode_addr = adev->gfx.rlc.rlc_dram_ucode; + break; + case AMDGPU_UCODE_ID_CP_MES: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_offset_bytes); + break; + case AMDGPU_UCODE_ID_CP_MES_DATA: + ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_ERAM: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { - ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); - - memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes) + - le32_to_cpu(dmcu_hdr->intv_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) { - ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); - memcpy(ucode->kaddr, - (void *)((uint8_t *)ucode->fw->data + - le32_to_cpu(header->ucode_array_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { - ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) { - ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) { - ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes; - memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode, - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) { - ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); - memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data + - le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)), - ucode->ucode_size); - } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA) { - ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); - memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data + - le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)), - ucode->ucode_size); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCU_INTV: + ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(dmcu_hdr->intv_offset_bytes); + break; + case AMDGPU_UCODE_ID_DMCUB: + ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + default: + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); + break; + } + } else { + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); + ucode_addr = (u8 *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes); } + memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index a90029ee9733..6e4bea012ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -94,30 +94,58 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + } + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status); + } } /* only uncorrectable error needs gpu reset */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index e5a75fb788dd..9e40bade0a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -48,6 +48,11 @@ struct amdgpu_umc_ras_funcs { void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); + bool (*query_ras_poison_mode)(struct amdgpu_device *adev); + void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, + void *ras_error_status); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h new file mode 100644 index 000000000000..919d9d401750 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h @@ -0,0 +1,51 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/ioctl.h> + +/* + * MMIO debugfs IOCTL structure + */ +struct amdgpu_debugfs_regs2_iocdata { + __u32 use_srbm, use_grbm, pg_lock; + struct { + __u32 se, sh, instance; + } grbm; + struct { + __u32 me, pipe, queue, vmid; + } srbm; +}; + +/* + * MMIO debugfs state data (per file* handle) + */ +struct amdgpu_debugfs_regs2_data { + struct amdgpu_device *adev; + struct mutex lock; + struct amdgpu_debugfs_regs2_iocdata id; +}; + +enum AMDGPU_DEBUGFS_REGS2_CMDS { + AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0, +}; + +#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d451c359606a..6f8de11a17f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); + +static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, + uint32_t size, + struct amdgpu_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct amdgpu_bo *bo = NULL; + void *addr; + int r; + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo, NULL, &addr); + if (r) + return r; + + if (adev->uvd.address_64_bit) + goto succ; + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto err_pin; + r = amdgpu_bo_kmap(bo, &addr); + if (r) + goto err_kmap; +succ: + amdgpu_bo_unreserve(bo); + *bo_ptr = bo; + return 0; +err_kmap: + amdgpu_bo_unpin(bo); +err_pin: +err: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +} int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { @@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); + if (r) + return r; + switch (adev->asic_type) { case CHIP_TONGA: adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; @@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); int i, j; drm_sched_entity_destroy(&adev->uvd.entity); @@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } + amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); release_firmware(adev->uvd.fw); return 0; @@ -403,7 +454,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { /* re-write 0 since err_event_athub will corrupt VCPU buffer */ if (in_ras_intr) memset(adev->uvd.inst[j].saved_bo, 0, size); @@ -436,7 +487,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.inst[i].cpu_addr; if (adev->uvd.inst[i].saved_bo != NULL) { - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); drm_dev_exit(idx); } @@ -449,7 +500,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); drm_dev_exit(idx); @@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - - if (!ring->adev->uvd.address_64_bit) { - struct ttm_operation_ctx ctx = { true, false }; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - } - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) - goto err; + return r; if (adev->asic_type >= CHIP_VEGA10) { offset_idx = 1 + ring->me; @@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err_free; } + amdgpu_bo_reserve(bo, true); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); @@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, err_free: amdgpu_job_free(job); - -err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; - int r, i; - - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + int i; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, msg[i] = cpu_to_le32(0x0); return amdgpu_uvd_send_msg(ring, bo, true, fence); + } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, @@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); - if (r) - return r; + if (direct) { + bo = adev->uvd.ib_bo; + } else { + r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); + if (r) + return r; + } + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); + r = amdgpu_uvd_send_msg(ring, bo, direct, fence); + + if (!direct) + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + + return r; } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) @@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) struct dma_fence *fence; long r; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + r = amdgpu_uvd_get_create_msg(ring, 1, &fence); if (r) goto error; + r = dma_fence_wait_timeout(fence, false, timeout); + dma_fence_put(fence); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto error; + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index edbb8194ee81..76ac9699885d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -68,6 +68,7 @@ struct amdgpu_uvd { /* store image width to adjust nb memory state */ unsigned decode_image_width; uint32_t keyselect; + struct amdgpu_bo *ib_bo; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 8e8dee9fac9f..688bef1649b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence); static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); @@ -314,7 +313,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(cpu_addr, adev->vce.fw->data + offset, adev->vce.fw->size - offset); drm_dev_exit(idx); @@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * Open up a stream for HW test */ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; struct amdgpu_ib *ib; + struct amdgpu_ib ib_msg; struct dma_fence *f = NULL; uint64_t addr; int i, r; @@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, if (r) return r; - ib = &job->ibs[0]; + memset(&ib_msg, 0, sizeof(ib_msg)); + /* only one gpu page is needed, alloc +1 page to make addr aligned. */ + r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + &ib_msg); + if (r) + goto err; - addr = amdgpu_bo_gpu_offset(bo); + ib = &job->ibs[0]; + /* let addr point to page boundary */ + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr); /* stitch together an VCE create msg */ ib->length_dw = 0; @@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; r = amdgpu_job_submit_direct(job, ring, &f); + amdgpu_ib_free(ring->adev, &ib_msg, f); if (r) goto err; @@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; long r; /* skip vce ring1/2 ib test for now, since it's not reliable */ if (ring != &ring->adev->vce.ring[0]) return 0; - r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - - r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) goto error; @@ -1163,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } + +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index d6d83a3ec803..be4a6e773c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring); unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring); +enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 008a308a4eca..585961c2f5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -86,8 +86,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; i++) atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): if (adev->apu_flags & AMD_APU_IS_RAVEN2) fw_name = FIRMWARE_RAVEN2; else if (adev->apu_flags & AMD_APU_IS_PICASSO) @@ -95,13 +96,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) else fw_name = FIRMWARE_RAVEN; break; - case CHIP_ARCTURUS: + case IP_VERSION(2, 5, 0): fw_name = FIRMWARE_ARCTURUS; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_RENOIR: + case IP_VERSION(2, 2, 0): if (adev->apu_flags & AMD_APU_IS_RENOIR) fw_name = FIRMWARE_RENOIR; else @@ -111,58 +112,54 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_ALDEBARAN: + case IP_VERSION(2, 6, 0): fw_name = FIRMWARE_ALDEBARAN; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI10: + case IP_VERSION(2, 0, 0): fw_name = FIRMWARE_NAVI10; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI14: - fw_name = FIRMWARE_NAVI14; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case CHIP_NAVI12: - fw_name = FIRMWARE_NAVI12; - if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && - (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) - adev->vcn.indirect_sram = true; - break; - case CHIP_SIENNA_CICHLID: - fw_name = FIRMWARE_SIENNA_CICHLID; + case IP_VERSION(2, 0, 2): + if (adev->asic_type == CHIP_NAVI12) + fw_name = FIRMWARE_NAVI12; + else + fw_name = FIRMWARE_NAVI14; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVY_FLOUNDER: - fw_name = FIRMWARE_NAVY_FLOUNDER; + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + fw_name = FIRMWARE_SIENNA_CICHLID; + else + fw_name = FIRMWARE_NAVY_FLOUNDER; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_VANGOGH: + case IP_VERSION(3, 0, 2): fw_name = FIRMWARE_VANGOGH; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(3, 0, 16): fw_name = FIRMWARE_DIMGREY_CAVEFISH; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(3, 0, 33): fw_name = FIRMWARE_BEIGE_GOBY; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(3, 1, 1): fw_name = FIRMWARE_YELLOW_CARP; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) @@ -330,7 +327,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) if (!adev->vcn.inst[i].saved_bo) return -ENOMEM; - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); drm_dev_exit(idx); } @@ -354,7 +351,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) ptr = adev->vcn.inst[i].cpu_addr; if (adev->vcn.inst[i].saved_bo != NULL) { - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); drm_dev_exit(idx); } @@ -367,7 +364,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); drm_dev_exit(idx); @@ -541,15 +538,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; - void *msg = NULL; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; r = amdgpu_job_alloc_with_ib(adev, 64, @@ -558,8 +554,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); - msg = amdgpu_bo_kptr(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -576,9 +570,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -588,27 +580,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + amdgpu_ib_free(adev, ib_msg, f); return r; } static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000038); msg[2] = cpu_to_le32(0x00000001); @@ -630,19 +621,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo **bo) + struct amdgpu_ib *ib) { struct amdgpu_device *adev = ring->adev; uint32_t *msg; int r, i; - *bo = NULL; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - bo, NULL, (void **)&msg); + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2, + AMDGPU_IB_POOL_DIRECT, + ib); if (r) return r; + msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr); msg[0] = cpu_to_le32(0x00000028); msg[1] = cpu_to_le32(0x00000018); msg[2] = cpu_to_le32(0x00000000); @@ -658,21 +650,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence); if (r) goto error; @@ -688,8 +680,8 @@ error: } static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) + struct amdgpu_ib *ib_msg, + struct dma_fence **fence) { struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; const unsigned int ib_size_dw = 64; @@ -697,7 +689,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr; + uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, @@ -706,7 +698,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, goto err; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); ib->length_dw = 0; ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; @@ -726,9 +717,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, if (r) goto err_free; - amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); if (fence) *fence = dma_fence_get(f); @@ -738,31 +727,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, err_free: amdgpu_job_free(job); - err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_ib_free(adev, ib_msg, f); return r; } int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo; + struct amdgpu_ib ib; long r; - r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib); if (r) goto error; - r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence); + r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence); if (r) goto error; @@ -809,7 +796,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -825,7 +812,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -863,7 +850,7 @@ err: } static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, + struct amdgpu_ib *ib_msg, struct dma_fence **fence) { const unsigned ib_size_dw = 16; @@ -879,7 +866,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han return r; ib = &job->ibs[0]; - addr = amdgpu_bo_gpu_offset(bo); + addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; ib->ptr[ib->length_dw++] = 0x00000018; @@ -918,21 +905,23 @@ err: int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { + struct amdgpu_device *adev = ring->adev; struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_ib ib; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_IB_POOL_DIRECT, + &ib); if (r) return r; - r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL); + r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL); if (r) goto error; - r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence); + r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence); if (r) goto error; @@ -943,9 +932,49 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; error: + amdgpu_ib_free(adev, &ib, fence); dma_fence_put(fence); - amdgpu_bo_unreserve(bo); - amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } + +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) +{ + switch(ring) { + case 0: + return AMDGPU_RING_PRIO_0; + case 1: + return AMDGPU_RING_PRIO_1; + case 2: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev) +{ + int i; + unsigned int idx; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + /* currently only support 2 FW instances */ + if (i >= 2) { + dev_info(adev->dev, "More then 2 VCN FW instances!\n"); + break; + } + idx = AMDGPU_UCODE_ID_VCN + i; + adev->firmware.ucode[idx].ucode_id = idx; + adev->firmware.ucode[idx].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } + dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d74c62b49795..bfa27ea94804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -308,4 +308,8 @@ int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout); +enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring); + +void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ca058fbcccd4..3fc49823f527 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -283,17 +283,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); if (!*data) - return -ENOMEM; + goto data_failure; bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL); - bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps) + goto bps_failure; - if (!bps || !bps_bo) { - kfree(bps); - kfree(bps_bo); - kfree(*data); - return -ENOMEM; - } + bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps_bo) + goto bps_bo_failure; (*data)->bps = bps; (*data)->bps_bo = bps_bo; @@ -303,6 +301,13 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) virt->ras_init_done = true; return 0; + +bps_bo_failure: + kfree(bps); +bps_failure: + kfree(*data); +data_failure: + return -ENOMEM; } static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) @@ -532,9 +537,12 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version); - POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, + adev->psp.asd_context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, + adev->psp.ras_context.context.bin_desc.fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, + adev->psp.xgmi_context.context.bin_desc.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version); @@ -581,6 +589,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->encode_usage = 0; vf2pf_info->decode_usage = 0; + vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; vf2pf_info->checksum = amd_sriov_msg_checksum( vf2pf_info, vf2pf_info->header.size, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ce982afeff91..ac9a8cd21c4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); + if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) + hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6b15cad78de9..a96ae4c0e040 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -800,7 +800,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo = &vmbo->bo; unsigned entries, ats_entries; uint64_t addr; - int r; + int r, idx; /* Figure out our place in the hierarchy */ if (ancestor->parent) { @@ -845,9 +845,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, return r; } + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; + r = vm->update_funcs->map_table(vmbo); if (r) - return r; + goto exit; memset(¶ms, 0, sizeof(params)); params.adev = adev; @@ -856,7 +859,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; addr = 0; if (ats_entries) { @@ -872,7 +875,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, value, flags); if (r) - return r; + goto exit; addr += ats_entries * 8; } @@ -895,10 +898,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, value, flags); if (r) - return r; + goto exit; } - return vm->update_funcs->commit(¶ms, NULL); + r = vm->update_funcs->commit(¶ms, NULL); +exit: + drm_dev_exit(idx); + return r; } /** @@ -1384,11 +1390,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate) { struct amdgpu_vm_update_params params; - int r; + int r, idx; if (list_empty(&vm->relocated)) return 0; + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; @@ -1396,7 +1405,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); if (r) - return r; + goto exit; while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *entry; @@ -1414,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, &vm->last_update); if (r) goto error; + drm_dev_exit(idx); return 0; error: amdgpu_vm_invalidate_pds(adev, vm); +exit: + drm_dev_exit(idx); return r; } @@ -1706,7 +1718,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, enum amdgpu_sync_mode sync_mode; int r, idx; - if (!drm_dev_enter(&adev->ddev, &idx)) + if (!drm_dev_enter(adev_to_drm(adev), &idx)) return -ENODEV; memset(¶ms, 0, sizeof(params)); @@ -2090,30 +2102,14 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct dma_resv *resv = vm->root.bo->tbo.base.resv; - struct dma_fence *excl, **shared; - unsigned i, shared_count; - int r; + struct dma_resv_iter cursor; + struct dma_fence *fence; - r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); - if (r) { - /* Not enough memory to grab the fence list, as last resort - * block for all the fences to complete. - */ - dma_resv_wait_timeout(resv, true, false, - MAX_SCHEDULE_TIMEOUT); - return; - } - - /* Add a callback for each fence in the reservation object */ - amdgpu_vm_prt_get(adev); - amdgpu_vm_add_prt_cb(adev, excl); - - for (i = 0; i < shared_count; ++i) { + dma_resv_for_each_fence(&cursor, resv, true, fence) { + /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); - amdgpu_vm_add_prt_cb(adev, shared[i]); + amdgpu_vm_add_prt_cb(adev, fence); } - - kfree(shared); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 978ac927ac11..567df2db23ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) "%s", "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kobject_put(&hive->kobj); kfree(hive); hive = NULL; goto pro_end; @@ -806,9 +807,9 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, xgmi23_pcs_err_status_reg_aldebaran[i]); - for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) + for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, - xgmi23_pcs_err_status_reg_aldebaran[i]); + xgmi3x16_pcs_err_status_reg_aldebaran[i]); for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) pcs_clear_status(adev, walf_pcs_err_status_reg_aldebaran[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index a434c71fde8e..7326b6c1b71c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -204,8 +204,10 @@ struct amd_sriov_msg_pf2vf_info { } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST]; /* UUID info */ struct amd_sriov_msg_uuid_info uuid_info; + /* pcie atomic Ops info */ + uint32_t pcie_atomic_ops_enabled_flags; /* reserved */ - uint32_t reserved[256 - 47]; + uint32_t reserved[256 - 48]; }; struct amd_sriov_msg_vf2pf_info_header { @@ -259,9 +261,10 @@ struct amd_sriov_msg_vf2pf_info { uint8_t id; uint32_t version; } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE]; + uint64_t dummy_page_addr; /* reserved */ - uint32_t reserved[256-68]; + uint32_t reserved[256-70]; }; /* mailbox message send from guest to host */ diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index 3ac505d954c4..ab6a07e5e8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -77,10 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): athub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_v2_0_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c index c12c2900732b..2edefd10e56c 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -70,11 +70,10 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c deleted file mode 100644 index 608a113ce354..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "beige_goby_ip_offset.h" - -int beige_goby_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialize the block needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c deleted file mode 100644 index 58808814d8fb..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "cyan_skillfish_ip_offset.h" - -int cyan_skillfish_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index b200b9e722d9..8318ee8339f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2092,22 +2092,18 @@ static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) return 1; else return 0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: return 6; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 14514a145c17..43c5e3ec9a39 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -637,6 +637,36 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, } } +static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t hw_assert_msklo, hw_assert_mskhi; + uint32_t v0, v1, v28, v31; + + hw_assert_msklo = RREG32_SOC15(DF, 0, + mmDF_CS_UMC_AON0_HardwareAssertMaskLow); + hw_assert_mskhi = RREG32_SOC15(DF, 0, + mmDF_NCS_PG0_HardwareAssertMaskHigh); + + v0 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0); + v1 = REG_GET_FIELD(hw_assert_msklo, + DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1); + v28 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28); + v31 = REG_GET_FIELD(hw_assert_mskhi, + DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31); + + if (v0 && v1 && v28 && v31) + return true; + else if (!v0 && !v1 && !v28 && !v31) + return false; + else { + dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n", + v0, v1, v28, v31); + return false; + } +} + const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, .sw_fini = df_v3_6_sw_fini, @@ -651,4 +681,5 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .pmc_get_count = df_v3_6_pmc_get_count, .get_fica = df_v3_6_get_fica, .set_fica = df_v3_6_set_fica, + .query_ras_poison_mode = df_v3_6_query_ras_poison_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 16dbe593cba2..dbe7442fb25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -270,25 +270,6 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin"); MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin"); MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin"); -static const struct soc15_reg_golden golden_settings_gc_10_0[] = -{ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), - /* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382), - /* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e), - /* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f), - /* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250), - /* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261), - /* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240), - /* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */ - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241), -}; - static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -1537,7 +1518,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32 scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4; - if (adev->asic_type >= CHIP_SIENNA_CICHLID) { + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX] + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4; @@ -3727,18 +3708,18 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, golden_settings_gc_rlc_spm_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10)); break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): soc15_program_register_sequence(adev, golden_settings_gc_rlc_spm_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14)); break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): soc15_program_register_sequence(adev, golden_settings_gc_rlc_spm_10_1_2_nv12, (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12)); @@ -3750,8 +3731,8 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev) static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): soc15_program_register_sequence(adev, golden_settings_gc_10_1, (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); @@ -3759,7 +3740,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): soc15_program_register_sequence(adev, golden_settings_gc_10_1_1, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1)); @@ -3767,7 +3748,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): soc15_program_register_sequence(adev, golden_settings_gc_10_1_2, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2)); @@ -3775,7 +3756,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_1_2_nv12, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(10, 3, 0): soc15_program_register_sequence(adev, golden_settings_gc_10_3, (const u32)ARRAY_SIZE(golden_settings_gc_10_3)); @@ -3783,35 +3764,32 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_3_sienna_cichlid, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_sienna_cichlid)); break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(10, 3, 2): soc15_program_register_sequence(adev, golden_settings_gc_10_3_2, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_2)); break; - case CHIP_VANGOGH: + case IP_VERSION(10, 3, 1): soc15_program_register_sequence(adev, golden_settings_gc_10_3_vangogh, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh)); break; - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 3): soc15_program_register_sequence(adev, golden_settings_gc_10_3_3, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_3)); break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(10, 3, 4): soc15_program_register_sequence(adev, golden_settings_gc_10_3_4, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(10, 3, 5): soc15_program_register_sequence(adev, golden_settings_gc_10_3_5, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_5)); break; - case CHIP_CYAN_SKILLFISH: - soc15_program_register_sequence(adev, - golden_settings_gc_10_0, - (const u32)ARRAY_SIZE(golden_settings_gc_10_0)); + case IP_VERSION(10, 1, 3): soc15_program_register_sequence(adev, golden_settings_gc_10_0_cyan_skillfish, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish)); @@ -3985,11 +3963,11 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) { adev->gfx.cp_fw_write_wait = false; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_CYAN_SKILLFISH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): if ((adev->gfx.me_fw_version >= 0x00000046) && (adev->gfx.me_feature_version >= 27) && (adev->gfx.pfp_fw_version >= 0x00000068) && @@ -3998,12 +3976,12 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 27)) adev->gfx.cp_fw_write_wait = true; break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.cp_fw_write_wait = true; break; default: @@ -4066,8 +4044,8 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev) static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; @@ -4093,38 +4071,38 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): chip_name = "navi14"; if (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)) wks = "_wks"; break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): chip_name = "navi12"; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(10, 3, 0): chip_name = "sienna_cichlid"; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(10, 3, 2): chip_name = "navy_flounder"; break; - case CHIP_VANGOGH: + case IP_VERSION(10, 3, 1): chip_name = "vangogh"; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(10, 3, 4): chip_name = "dimgrey_cavefish"; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(10, 3, 5): chip_name = "beige_goby"; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 3): chip_name = "yellow_carp"; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(10, 1, 3): if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) chip_name = "cyan_skillfish2"; else @@ -4684,10 +4662,10 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.funcs = &gfx_v10_0_gfx_funcs; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -4695,12 +4673,12 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -4710,7 +4688,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.gb_addr_config_fields.num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(10, 1, 3): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -4818,11 +4796,11 @@ static int gfx_v10_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_CYAN_SKILLFISH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -4830,12 +4808,12 @@ static int gfx_v10_0_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -5068,8 +5046,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if (((adev->asic_type == CHIP_SIENNA_CICHLID) || - (adev->asic_type == CHIP_YELLOW_CARP)) && + if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); @@ -5096,7 +5074,7 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade /* for ASICs that integrates GFX v10.3 * pa_sc_tile_steering_override should be set to 0 */ - if (adev->asic_type >= CHIP_SIENNA_CICHLID) + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) return 0; /* init num_sc */ @@ -5249,7 +5227,7 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) /* TCCs are global (not instanced). */ uint32_t tcc_disable; - if (adev->asic_type >= CHIP_SIENNA_CICHLID) { + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); } else { @@ -5326,7 +5304,7 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); /* csib */ - if (adev->asic_type == CHIP_NAVI12) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO, @@ -5948,7 +5926,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); - if (adev->asic_type == CHIP_NAVI12) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp); } else { WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); @@ -6337,13 +6315,13 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, } WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); } - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index); WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); @@ -6474,13 +6452,13 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) { if (enable) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0); break; default: @@ -6488,13 +6466,13 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) break; } } else { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); @@ -6586,13 +6564,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; /* tell RLC which is KIQ queue */ - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); tmp &= 0xffffff00; tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); @@ -7303,11 +7281,11 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) /* check if mmVGT_ESGS_RING_SIZE_UMD * has been remapped to mmVGT_ESGS_RING_SIZE */ - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); @@ -7320,8 +7298,8 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) return false; } break; - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): return true; default: data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE); @@ -7350,13 +7328,13 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) * index will auto-inc after each data writting */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << GRBM_CAM_DATA__CAM_ADDR__SHIFT) | @@ -7520,19 +7498,19 @@ static int gfx_v10_0_hw_init(void *handle) * init golden registers and rlc resume may override some registers, * reconfig them here */ - if (adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14 || - adev->asic_type == CHIP_NAVI12) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) gfx_v10_0_tcp_harvest(adev); r = gfx_v10_0_cp_resume(adev); if (r) return r; - if (adev->asic_type == CHIP_SIENNA_CICHLID) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) gfx_v10_3_program_pbb_mode(adev); - if (adev->asic_type >= CHIP_SIENNA_CICHLID) + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) gfx_v10_3_set_power_brake_sequence(adev); return r; @@ -7584,7 +7562,7 @@ static int gfx_v10_0_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) { gfx_v10_0_cp_gfx_enable(adev, false); /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ - if (adev->asic_type >= CHIP_SIENNA_CICHLID) { + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); tmp &= 0xffffff00; WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); @@ -7670,13 +7648,13 @@ static int gfx_v10_0_soft_reset(void *handle) /* GRBM_STATUS2 */ tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid)) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, @@ -7726,11 +7704,22 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock, clock_lo, clock_hi, hi_check; - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); @@ -7784,19 +7773,19 @@ static int gfx_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_CYAN_SKILLFISH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X; break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid; break; default: @@ -7848,13 +7837,13 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) data = RLC_SAFE_MODE__CMD_MASK; data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); /* wait for RLC_SAFE_MODE */ @@ -7884,13 +7873,13 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) uint32_t data; data = RLC_SAFE_MODE__CMD_MASK; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); break; default: @@ -8193,7 +8182,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d mmCGTS_SA1_QUAD1_SM_CTRL_REG }; - if (adev->asic_type == CHIP_NAVI12) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) { for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) { reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] + tcp_ctrl_regs_nv12[i]; @@ -8238,8 +8227,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, /* === CGCG + CGLS === */ gfx_v10_0_update_coarse_grain_clock_gating(adev, enable); - if ((adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_NAVI12)) + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))) gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev); } else { /* CGCG/CGLS should be disabled before MGCG/MGLS @@ -8270,6 +8260,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + + amdgpu_gfx_off_ctrl(adev, false); + /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) @@ -8284,6 +8277,8 @@ static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev, @@ -8335,15 +8330,12 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) * Power/performance team will optimize it and might give a new value later. */ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { - switch (adev->asic_type) { - case CHIP_VANGOGH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); break; - case CHIP_YELLOW_CARP: - data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; - WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); - break; default: break; } @@ -8399,18 +8391,18 @@ static int gfx_v10_0_set_powergating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): amdgpu_gfx_off_ctrl(adev, enable); break; - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): gfx_v10_cntl_pg(adev, enable); amdgpu_gfx_off_ctrl(adev, enable); break; @@ -8428,16 +8420,16 @@ static int gfx_v10_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): gfx_v10_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -9541,19 +9533,19 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 3, 0): adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov; break; default: @@ -9641,8 +9633,8 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if (((adev->asic_type == CHIP_SIENNA_CICHLID) || - (adev->asic_type == CHIP_YELLOW_CARP)) && + if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) || + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; mask = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 37b4a3db6360..d17a6f399347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3575,12 +3575,16 @@ static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + data = RREG32(mmRLC_SPM_VMID); data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0302c23e9a7..5f112efda634 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5624,6 +5624,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 data; + amdgpu_gfx_off_ctrl(adev, false); + if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(mmRLC_SPM_VMID); else @@ -5636,6 +5638,8 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_NO_KIQ(mmRLC_SPM_VMID, data); else WREG32(mmRLC_SPM_VMID, data); + + amdgpu_gfx_off_ctrl(adev, true); } static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 603c259b073b..b305fd39874f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -953,8 +958,8 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_0, ARRAY_SIZE(golden_settings_gc_9_0)); @@ -962,7 +967,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg10, ARRAY_SIZE(golden_settings_gc_9_0_vg10)); break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_2_1, ARRAY_SIZE(golden_settings_gc_9_2_1)); @@ -970,7 +975,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_2_1_vg12, ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_0, ARRAY_SIZE(golden_settings_gc_9_0)); @@ -978,12 +983,13 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg20, ARRAY_SIZE(golden_settings_gc_9_0_vg20)); break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): soc15_program_register_sequence(adev, golden_settings_gc_9_4_1_arct, ARRAY_SIZE(golden_settings_gc_9_4_1_arct)); break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_1, ARRAY_SIZE(golden_settings_gc_9_1)); if (adev->apu_flags & AMD_APU_IS_RAVEN2) @@ -995,12 +1001,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_1_rv1, ARRAY_SIZE(golden_settings_gc_9_1_rv1)); break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): soc15_program_register_sequence(adev, golden_settings_gc_9_1_rn, ARRAY_SIZE(golden_settings_gc_9_1_rn)); return; /* for renoir, don't need common goldensetting */ - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): gfx_v9_4_2_init_golden_registers(adev, adev->smuio.funcs->get_die_id(adev)); break; @@ -1008,8 +1014,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) break; } - if ((adev->asic_type != CHIP_ARCTURUS) && - (adev->asic_type != CHIP_ALDEBARAN)) + if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))) soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } @@ -1193,15 +1199,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) adev->gfx.me_fw_write_wait = false; adev->gfx.mec_fw_write_wait = false; - if ((adev->asic_type != CHIP_ARCTURUS) && + if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) && ((adev->gfx.mec_fw_version < 0x000001a5) || (adev->gfx.mec_feature_version < 46) || (adev->gfx.pfp_fw_version < 0x000000b7) || (adev->gfx.pfp_feature_version < 46))) DRM_WARN_ONCE("CP firmware version too old, please update!"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42) && (adev->gfx.pfp_fw_version >= 0x000000b1) && @@ -1212,7 +1218,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 42)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 44) && (adev->gfx.pfp_fw_version >= 0x000000b2) && @@ -1223,7 +1229,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 44)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 44) && (adev->gfx.pfp_fw_version >= 0x000000b2) && @@ -1234,7 +1240,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) (adev->gfx.mec_feature_version >= 44)) adev->gfx.mec_fw_write_wait = true; break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): if ((adev->gfx.me_fw_version >= 0x0000009c) && (adev->gfx.me_feature_version >= 42) && (adev->gfx.pfp_fw_version >= 0x000000b1) && @@ -1297,7 +1304,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev) static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev) { - if ((adev->asic_type == CHIP_RENOIR) && + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) && (adev->gfx.me_fw_version >= 0x000000a5) && (adev->gfx.me_feature_version >= 52)) return true; @@ -1310,12 +1317,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) || (adev->apu_flags & AMD_APU_IS_PICASSO)) && ((!is_raven_kicker(adev) && @@ -1329,7 +1337,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): if (adev->pm.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | @@ -1553,9 +1561,9 @@ out: static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_ALDEBARAN || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_RENOIR) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) return false; return true; @@ -1663,17 +1671,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): chip_name = "vega10"; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): chip_name = "vega12"; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): chip_name = "vega20"; break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; else if (adev->apu_flags & AMD_APU_IS_PICASSO) @@ -1681,16 +1690,16 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) else chip_name = "raven"; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): chip_name = "arcturus"; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): if (adev->apu_flags & AMD_APU_IS_RENOIR) chip_name = "renoir"; else chip_name = "green_sardine"; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): chip_name = "aldebaran"; break; default: @@ -1794,7 +1803,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) always_on_cu_num = 4; - else if (adev->asic_type == CHIP_VEGA12) + else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1)) always_on_cu_num = 8; else always_on_cu_num = 12; @@ -1963,11 +1972,12 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) return r; } - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): gfx_v9_0_init_lbpw(adev); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): gfx_v9_4_init_lbpw(adev); break; default: @@ -2142,8 +2152,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.funcs = &gfx_v9_0_gfx_funcs; - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2151,7 +2161,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2160,7 +2170,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; DRM_INFO("fix gfx.config for vega12\n"); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -2175,7 +2185,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) if (err) return err; break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2186,7 +2197,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) else gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -2197,7 +2208,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config &= ~0xf3e777ff; gb_addr_config |= 0x22014042; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2207,7 +2218,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config &= ~0xf3e777ff; gb_addr_config |= 0x22010042; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; @@ -2305,14 +2316,15 @@ static int gfx_v9_0_sw_init(void *handle) struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->gfx.mec.num_mec = 2; break; default: @@ -2455,7 +2467,9 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); if (adev->flags & AMD_IS_APU) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, @@ -2596,8 +2610,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) { uint32_t tmp; - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 4, 1): tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT, 1); @@ -2932,7 +2946,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); - if (adev->asic_type != CHIP_RENOIR) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0)) pwr_10_0_gfxip_control_over_cgpg(adev, true); } } @@ -3044,7 +3058,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - if (adev->asic_type == CHIP_VEGA12 || + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) || (adev->apu_flags & AMD_APU_IS_RAVEN2)) gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); @@ -3157,14 +3171,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): if (amdgpu_lbpw == 0) gfx_v9_0_enable_lbpw(adev, false); else gfx_v9_0_enable_lbpw(adev, true); break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): if (amdgpu_lbpw > 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -3599,7 +3614,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) /* set static priority for a queue/ring */ gfx_v9_0_mqd_set_priority(ring, mqd); - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); + mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM); /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -3959,8 +3974,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) { u32 tmp; - if (adev->asic_type != CHIP_ARCTURUS && - adev->asic_type != CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) && + adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)) return; tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); @@ -4000,7 +4015,7 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - if (adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) gfx_v9_4_2_set_power_brake_sequence(adev); return r; @@ -4045,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ + if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } @@ -4228,19 +4244,38 @@ failed_kiq_read: static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 3, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } @@ -4582,7 +4617,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); vgpr_init_regs_ptr = vgpr_init_regs_arcturus; @@ -4732,8 +4767,8 @@ static int gfx_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) adev->gfx.num_gfx_rings = 0; else adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; @@ -4767,7 +4802,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) } /* requires IBs so do in late init after IB pool is initialized */ - if (adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); else r = gfx_v9_0_do_edc_gpr_workarounds(adev); @@ -4895,7 +4930,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - RLC_CGTT_MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->asic_type != CHIP_VEGA12) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | @@ -4929,7 +4964,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev /* 1 - MGCG_OVERRIDE */ def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); - if (adev->asic_type != CHIP_VEGA12) + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1)) data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | @@ -5035,7 +5070,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev /* enable cgcg FSM(0x0000363F) */ def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; else @@ -5094,6 +5129,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; + amdgpu_gfx_off_ctrl(adev, false); + reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); @@ -5107,6 +5144,8 @@ static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data); else WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data); + + amdgpu_gfx_off_ctrl(adev, true); } static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev, @@ -5161,9 +5200,10 @@ static int gfx_v9_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_PG_STATE_GATE); - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 3, 0): if (!enable) amdgpu_gfx_off_ctrl(adev, false); @@ -5189,7 +5229,7 @@ static int gfx_v9_0_set_powergating_state(void *handle, if (enable) amdgpu_gfx_off_ctrl(adev, true); break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): amdgpu_gfx_off_ctrl(adev, enable); break; default: @@ -5207,14 +5247,15 @@ static int gfx_v9_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -5256,7 +5297,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) { /* AMD_CG_SUPPORT_GFX_3D_CGCG */ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) @@ -7027,14 +7068,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: @@ -7045,17 +7087,18 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): adev->gds.gds_size = 0x10000; break; - case CHIP_RAVEN: - case CHIP_ARCTURUS: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 4, 1): adev->gds.gds_size = 0x1000; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): /* aldebaran removed all the GDS internal memory, * only support GWS opcode in kernel, like barrier * semaphore.etc */ @@ -7066,24 +7109,25 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) break; } - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA20: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 4, 0): adev->gds.gds_compute_max_wave_id = 0x7ff; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->gds.gds_compute_max_wave_id = 0x27f; break; - case CHIP_RAVEN: + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): if (adev->apu_flags & AMD_APU_IS_RAVEN2) adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */ else adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->gds.gds_compute_max_wave_id = 0xfff; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): /* deprecated for Aldebaran, no usage at all */ adev->gds.gds_compute_max_wave_id = 0; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 00a2b36a24b3..c4f37a161875 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -706,6 +706,11 @@ int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return 0; + /* Workaround for ALDEBARAN, skip GPRs init in GPU reset. + Will remove it once GPRs init algorithm works for all CU settings. */ + if (amdgpu_in_reset(adev)) + return 0; + gfx_v9_4_2_do_sgprs_init(adev); gfx_v9_4_2_do_vgprs_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index bda1542ef1dd..480e41847d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -348,6 +348,10 @@ static void gfxhub_v1_0_gart_disable(struct amdgpu_device *adev) WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); + if (amdgpu_sriov_vf(adev)) + /* Avoid write to GMC registers */ + return; + /* Setup TLB control */ tmp = RREG32_SOC15(GC, 0, mmMC_VM_MX_L1_TLB_CNTL); tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c index 497b86c376c6..90f0aefbdb39 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c @@ -54,15 +54,17 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, PF_MAX_REGION); } else { xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL); seg_size = REG_GET_FIELD( RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE), MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24; + max_region = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); } - max_region = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION); switch (adev->asic_type) { @@ -89,9 +91,15 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev) if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes) return -EINVAL; - adev->gmc.xgmi.physical_node_id = - REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, - PF_LFB_REGION); + if (adev->asic_type == CHIP_ALDEBARAN) { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL_ALDE, + PF_LFB_REGION); + } else { + adev->gmc.xgmi.physical_node_id = + REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, + PF_LFB_REGION); + } if (adev->gmc.xgmi.physical_node_id > max_physical_node_id) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 1a374ec0514a..e80d1dc43079 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -506,8 +506,8 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) u32 max_num_physical_nodes = 0; u32 max_physical_node_id = 0; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: + switch (adev->ip_versions[XGMI_HWIP][0]) { + case IP_VERSION(4, 8, 0): max_num_physical_nodes = 4; max_physical_node_id = 3; break; @@ -544,7 +544,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines); - if (adev->asic_type == CHIP_YELLOW_CARP) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) { /* Get SA disabled bitmap from eFuse setting */ efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 41c3a0d70b7c..3ec5ff5a6dbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -133,7 +133,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev, * the new fast GRBM interface. */ if ((entry->vmid_src == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_SIENNA_CICHLID)) + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -268,7 +268,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * to avoid a false ACK due to the new fast GRBM interface. */ if ((vmhub == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_SIENNA_CICHLID)) + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0))) RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, hub_ip); @@ -657,8 +657,8 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(8, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; @@ -674,9 +674,9 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 4, 0): adev->mmhub.funcs = &mmhub_v2_3_funcs; break; default: @@ -687,13 +687,13 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->gfxhub.funcs = &gfxhub_v2_1_funcs; break; default: @@ -800,23 +800,9 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) adev->gmc.visible_vram_size = adev->gmc.real_vram_size; /* set the gart size */ - if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - default: - adev->gmc.gart_size = 512ULL << 20; - break; - } - } else + if (amdgpu_gart_size == -1) + adev->gmc.gart_size = 512ULL << 20; + else adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; gmc_v10_0_vram_gtt_location(adev, &adev->gmc); @@ -871,17 +857,17 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.vram_vendor = vram_vendor; } - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 1): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 0): + case IP_VERSION(10, 3, 2): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 4): + case IP_VERSION(10, 3, 5): + case IP_VERSION(10, 3, 3): adev->num_vmhubs = 2; /* * To fulfill 4-level page support, @@ -989,21 +975,6 @@ static int gmc_v10_0_sw_fini(void *handle) static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - break; - default: - break; - } } /** @@ -1098,6 +1069,8 @@ static int gmc_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v10_0_gart_disable(adev); + if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); @@ -1106,7 +1079,6 @@ static int gmc_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - gmc_v10_0_gart_disable(adev); return 0; } @@ -1161,8 +1133,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle, if (r) return r; - if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_YELLOW_CARP) + if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) return athub_v2_1_set_clockgating(adev, state); else return athub_v2_0_set_clockgating(adev, state); @@ -1174,8 +1145,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) adev->mmhub.funcs->get_clockgating(adev, flags); - if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_YELLOW_CARP) + if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0)) athub_v2_1_get_clockgating(adev, flags); else athub_v2_0_get_clockgating(adev, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 0e81e03e9b49..0fe714f54cca 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -841,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.mc_mask = 0xffffffffffULL; - r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44)); + r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); if (r) { dev_warn(adev->dev, "No suitable DMA available.\n"); return r; } - adev->need_swiotlb = drm_need_swiotlb(44); + adev->need_swiotlb = drm_need_swiotlb(40); r = gmc_v6_0_init_microcode(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d90c16a6b2b8..cb82404df534 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -579,7 +579,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, * the new fast GRBM interface. */ if ((entry->vmid_src == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_ALDEBARAN)) + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32(hub->vm_l2_pro_fault_status); status = RREG32(hub->vm_l2_pro_fault_status); @@ -597,26 +597,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, gfxhub_client_ids[cid], cid); } else { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 0, 0): mmhub_cid = mmhub_client_ids_vega10[cid][rw]; break; - case CHIP_VEGA12: + case IP_VERSION(9, 3, 0): mmhub_cid = mmhub_client_ids_vega12[cid][rw]; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): mmhub_cid = mmhub_client_ids_vega20[cid][rw]; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): mmhub_cid = mmhub_client_ids_arcturus[cid][rw]; break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 0): mmhub_cid = mmhub_client_ids_raven[cid][rw]; break; - case CHIP_RENOIR: + case IP_VERSION(1, 5, 0): + case IP_VERSION(2, 4, 0): mmhub_cid = mmhub_client_ids_renoir[cid][rw]; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): mmhub_cid = mmhub_client_ids_aldebaran[cid][rw]; break; default: @@ -694,7 +696,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { - if (adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) return false; return ((vmhub == AMDGPU_MMHUB_0 || @@ -745,7 +747,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, hub = &adev->vmhub[vmhub]; if (adev->gmc.xgmi.num_physical_nodes && - adev->asic_type == CHIP_VEGA20) { + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) { /* Vega20+XGMI caches PTEs in TC and TLB. Add a * heavy-weight TLB flush (type 2), which flushes * both. Due to a race condition with concurrent @@ -808,7 +810,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * GRBM interface. */ if ((vmhub == AMDGPU_GFXHUB_0) && - (adev->asic_type < CHIP_ALDEBARAN)) + (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2))) RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng); @@ -874,7 +876,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, * still need a second TLB flush after this. */ bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes && - adev->asic_type == CHIP_VEGA20); + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)); /* 2 dwords flush + 8 dwords fence */ unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8; @@ -1088,13 +1090,13 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, *flags &= ~AMDGPU_PTE_VALID; } - if ((adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) && + if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) && !(*flags & AMDGPU_PTE_SYSTEM) && mapping->bo_va->is_xgmi) *flags |= AMDGPU_PTE_SNOOPED; - if (adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) *flags |= mapping->flags & AMDGPU_PTE_SNOOPED; } @@ -1108,9 +1110,10 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) } else { u32 viewport; - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 1, 0): viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); size = (REG_GET_FIELD(viewport, HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * @@ -1118,9 +1121,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * 4); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: default: viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * @@ -1151,11 +1151,11 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(6, 0, 0): adev->umc.funcs = &umc_v6_0_funcs; break; - case CHIP_VEGA20: + case IP_VERSION(6, 1, 1): adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; @@ -1163,7 +1163,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.ras_funcs = &umc_v6_1_ras_funcs; break; - case CHIP_ARCTURUS: + case IP_VERSION(6, 1, 2): adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; @@ -1171,7 +1171,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.ras_funcs = &umc_v6_1_ras_funcs; break; - case CHIP_ALDEBARAN: + case IP_VERSION(6, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; @@ -1190,11 +1190,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 4, 1): adev->mmhub.funcs = &mmhub_v9_4_funcs; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): adev->mmhub.funcs = &mmhub_v1_7_funcs; break; default: @@ -1205,14 +1205,14 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA20: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 4, 0): adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; break; default: @@ -1233,8 +1233,9 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_ALDEBARAN: + /* is UMC the right IP to check for MCA? Maybe DF? */ + switch (adev->ip_versions[UMC_HWIP][0]) { + case IP_VERSION(6, 7, 0): if (!adev->gmc.xgmi.connected_to_cpu) adev->mca.funcs = &mca_v3_0_funcs; break; @@ -1247,11 +1248,12 @@ static int gmc_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->gmc.xgmi.supported = true; - if (adev->asic_type == CHIP_ALDEBARAN) { + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) { adev->gmc.xgmi.supported = true; adev->gmc.xgmi.connected_to_cpu = adev->smuio.funcs->is_host_gpu_xgmi_supported(adev); @@ -1289,7 +1291,8 @@ static int gmc_v9_0_late_init(void *handle) * Workaround performance drop issue with VBIOS enables partial * writes, while disables HBM ECC for vega10. */ - if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) { + if (!amdgpu_sriov_vf(adev) && + (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { if (adev->df.funcs->enable_ecc_force_par_wr_rmw) adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); @@ -1393,17 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) /* set the gart size */ if (amdgpu_gart_size == -1) { - switch (adev->asic_type) { - case CHIP_VEGA10: /* all engines support GPUVM */ - case CHIP_VEGA12: /* all engines support GPUVM */ - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): /* all engines support GPUVM */ + case IP_VERSION(9, 2, 1): /* all engines support GPUVM */ + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): default: adev->gmc.gart_size = 512ULL << 20; break; - case CHIP_RAVEN: /* DCE SG support */ - case CHIP_RENOIR: + case IP_VERSION(9, 1, 0): /* DCE SG support */ + case IP_VERSION(9, 2, 2): /* DCE SG support */ + case IP_VERSION(9, 3, 0): adev->gmc.gart_size = 1024ULL << 20; break; } @@ -1464,7 +1468,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) */ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_RAVEN) + if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } @@ -1507,8 +1512,9 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = vram_type; adev->gmc.vram_vendor = vram_vendor; - switch (adev->asic_type) { - case CHIP_RAVEN: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): adev->num_vmhubs = 2; if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { @@ -1520,11 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle) adev->vm_manager.num_level > 1; } break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RENOIR: - case CHIP_ALDEBARAN: + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 2): adev->num_vmhubs = 2; @@ -1539,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle) else amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->num_vmhubs = 3; /* Keep the vm size same with Vega20 */ @@ -1555,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, &adev->gmc.vm_fault); if (r) @@ -1622,8 +1628,8 @@ static int gmc_v9_0_sw_init(void *handle) * for video processing. */ adev->vm_manager.first_kfd_vmid = - (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8; + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) || + adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8; amdgpu_vm_manager_init(adev); @@ -1649,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle) static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(9, 0, 0): if (amdgpu_sriov_vf(adev)) break; fallthrough; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, ARRAY_SIZE(golden_settings_mmhub_1_0_0)); @@ -1662,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); break; - case CHIP_VEGA12: - break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 0): /* TODO for renoir */ soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, @@ -1684,7 +1689,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) */ void gmc_v9_0_restore_registers(struct amdgpu_device *adev) { - if (adev->asic_type == CHIP_RAVEN) { + if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) { WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register); WARN_ON(adev->gmc.sdpif_register != RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0)); @@ -1794,6 +1800,8 @@ static int gmc_v9_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v9_0_gart_disable(adev); + if (amdgpu_sriov_vf(adev)) { /* full access mode, so don't touch any GMC register */ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); @@ -1802,7 +1810,6 @@ static int gmc_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); - gmc_v9_0_gart_disable(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index 74b90cc2bf48..eecfb1545c1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -49,7 +49,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) return; if (!ring || !ring->funcs->emit_wreg) @@ -79,7 +79,7 @@ static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev) if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) return; - if (adev->asic_type >= CHIP_ALDEBARAN) + if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0)) WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0); else /*read back hdp ras counter to reset it to 0 */ @@ -91,9 +91,10 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev, { uint32_t def, data; - if (adev->asic_type == CHIP_VEGA10 || - adev->asic_type == CHIP_VEGA12 || - adev->asic_type == CHIP_RAVEN) { + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) { def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) @@ -135,8 +136,8 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev, static void hdp_v4_0_init_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (adev->ip_versions[HDP_HWIP][0]) { + case IP_VERSION(4, 2, 1): WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 85967a5570cb..299de1d131d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -32,26 +32,6 @@ #include "vcn/vcn_2_0_0_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" -#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff -#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 -#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a -#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea -#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb -#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf -#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 -#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 -#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec -#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed -#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 -#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 -#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 -#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f - -#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 - static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev); static int jpeg_v2_0_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 15a344ed340f..1a03baa59755 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -24,6 +24,26 @@ #ifndef __JPEG_V2_0_H__ #define __JPEG_V2_0_H__ +#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff +#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029 +#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a +#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb +#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf +#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9 +#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed +#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085 +#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 +#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 + void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 46096ad7f0d9..a29c86617fb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -423,6 +423,42 @@ static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) } } +/** + * jpeg_v2_6_dec_ring_insert_start - insert a start command + * + * @ring: amdgpu_ring pointer + * + * Write a start command to the ring. + */ +static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14))); +} + +/** + * jpeg_v2_6_dec_ring_insert_end - insert a end command + * + * @ring: amdgpu_ring pointer + * + * Write a end command to the ring. + */ +static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */ + + amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14))); +} + static bool jpeg_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -633,8 +669,8 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = { .test_ring = amdgpu_jpeg_dec_ring_test_ring, .test_ib = amdgpu_jpeg_dec_ring_test_ib, .insert_nop = jpeg_v2_0_dec_ring_nop, - .insert_start = jpeg_v2_0_dec_ring_insert_start, - .insert_end = jpeg_v2_0_dec_ring_insert_end, + .insert_start = jpeg_v2_6_dec_ring_insert_start, + .insert_end = jpeg_v2_6_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_jpeg_ring_begin_use, .end_use = amdgpu_jpeg_ring_end_use, diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index 058b65730a84..8f7107d392af 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -52,7 +52,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = { .ras_fini = mca_v3_0_mp0_ras_fini, .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MP0, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0, .sysfs_name = "mp0_err_count", }; @@ -79,7 +80,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = { .ras_fini = mca_v3_0_mp1_ras_fini, .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MP1, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1, .sysfs_name = "mp1_err_count", }; @@ -106,7 +108,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = { .ras_fini = mca_v3_0_mpio_ras_fini, .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MPIO, + .ras_block = AMDGPU_RAS_BLOCK__MCA, + .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO, .sysfs_name = "mpio_err_count", }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 7ded6b2f058e..25f8e93e5ec3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -153,18 +153,16 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): mmhub_cid = mmhub_client_ids_navi1x[cid][rw]; break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(2, 1, 2): mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; break; default: @@ -571,11 +569,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) return; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; @@ -606,11 +603,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): if (def != data) WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); if (def1 != data1) @@ -633,11 +629,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) return; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); break; default: @@ -651,11 +646,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; if (def != data) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); break; default: @@ -671,14 +665,12 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): mmhub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, @@ -698,11 +690,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 1, 0): + case IP_VERSION(2, 1, 1): + case IP_VERSION(2, 1, 2): data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 88e457a150e0..a11d60ec6321 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -90,9 +90,9 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev, dev_err(adev->dev, "MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[MMHUB_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 4, 0): mmhub_cid = mmhub_client_ids_vangogh[cid][rw]; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 530011622801..38241cf0e1f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -107,7 +107,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev, { u32 ih_cntl, ih_rb_cntl; - if (adev->asic_type < CHIP_SIENNA_CICHLID) + if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3)) return; ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2); @@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = navi10_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { @@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; u32 ih_chicken; - u32 tmp; int ret; int i; @@ -332,13 +330,10 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) { if (ih[0]->use_bus_addr) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[OSSSYS_HWIP][0]) { + case IP_VERSION(5, 0, 3): + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 1): ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid); ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); @@ -366,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, ih[0]->doorbell_index); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -423,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -516,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c deleted file mode 100644 index 88efaecf9f70..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi10_ip_offset.h" - -int navi10_reg_base_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - - return 0; -} - - diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c deleted file mode 100644 index a786d159e5e9..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi12_ip_offset.h" - -int navi12_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocks needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c deleted file mode 100644 index 4ea1e8fbb601..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "navi14_ip_offset.h" - -int navi14_reg_base_init(struct amdgpu_device *adev) -{ - int i; - - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); - } - - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index b184b656b9b6..ee7cab37dfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -53,6 +53,16 @@ #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ +#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L + static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -318,6 +328,27 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = { .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, }; +const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = { + .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, +}; + static void nbio_v2_3_init_registers(struct amdgpu_device *adev) { uint32_t def, data; @@ -328,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index a43b60acf7f6..6074dd3a1ed8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -27,6 +27,7 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 0d2d629e2d6a..4bbacf1be25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CI_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 3c00666a13e1..37a4039fdfc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 8f2a315e7c73..3444332ea110 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); } + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index f50045cebd44..dc5e93756fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -56,12 +56,15 @@ * These are nbio v7_4_1 registers mask. Temporarily define these here since * nbio v7_4_1 header is incomplete. */ -#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */ #define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L #define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc #define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 @@ -334,17 +337,34 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, - .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, - .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, - .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, - .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, - .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, - .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, +}; + +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { + .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, + .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, + .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, + .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, + .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, + .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, + .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, + .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, + .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, + .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, + .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK, }; static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) @@ -387,13 +407,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device "errors detected in %s block, " "no user action is needed.\n", obj->err_data.ce_count, - ras_block_str(adev->nbio.ras_if->block)); + get_ras_block_str(adev->nbio.ras_if)); if (err_data.ue_count) dev_info(adev->dev, "%ld uncorrectable hardware " "errors detected in %s block\n", obj->err_data.ue_count, - ras_block_str(adev->nbio.ras_if->block)); + get_ras_block_str(adev->nbio.ras_if)); } dev_info(adev->dev, "RAS controller interrupt triggered " @@ -566,7 +586,9 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a return r; } -#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 +#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030 +#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) @@ -575,12 +597,20 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, uint32_t corr, fatal, non_fatal; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + if (adev->asic_type == CHIP_ALDEBARAN) + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE); + else + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrNonFatal); - parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); + + if (adev->asic_type == CHIP_ALDEBARAN) + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE); + else + parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); if (corr) err_data->ce_count++; @@ -589,13 +619,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, if (corr || fatal || non_fatal) { central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ - WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts); + else + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); if (fatal) + { /* clear parity fatal error indication field */ - WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, - parity_sts); + if (adev->asic_type == CHIP_ALDEBARAN) + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts); + else + WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts); + } if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, BIFL_RasContller_Intr_Recv)) { @@ -656,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { uint32_t def, data; + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + return; + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index b8216581ec8d..cc5692db6f98 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -27,6 +27,7 @@ #include "soc15_common.h" extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index ff80786e3918..2ec1ffb36b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -180,8 +180,10 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = { static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -194,29 +196,27 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, *codecs = &sc_video_codecs_decode; } return 0; - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_VANGOGH: + case IP_VERSION(3, 0, 16): + case IP_VERSION(3, 0, 2): if (encode) *codecs = &nv_video_codecs_encode; else *codecs = &sc_video_codecs_decode; return 0; - case CHIP_YELLOW_CARP: + case IP_VERSION(3, 1, 1): if (encode) *codecs = &nv_video_codecs_encode; else *codecs = &yc_video_codecs_decode; return 0; - case CHIP_BEIGE_GOBY: + case IP_VERSION(3, 0, 33): if (encode) *codecs = &bg_video_codecs_encode; else *codecs = &bg_video_codecs_decode; return 0; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 0, 2): if (encode) *codecs = &nv_video_codecs_encode; else @@ -511,14 +511,15 @@ nv_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - switch (adev->asic_type) { - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 5, 0): + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): return AMD_RESET_METHOD_MODE2; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): return AMD_RESET_METHOD_MODE1; default: if (amdgpu_dpm_is_baco_supported(adev)) @@ -599,7 +600,7 @@ static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } -static const struct amdgpu_ip_block_version nv_common_ip_block = +const struct amdgpu_ip_block_version nv_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 1, @@ -608,314 +609,11 @@ static const struct amdgpu_ip_block_version nv_common_ip_block = .funcs = &nv_common_ip_funcs, }; -static bool nv_is_headless_sku(struct pci_dev *pdev) -{ - if ((pdev->device == 0x731E && - (pdev->revision == 0xC6 || pdev->revision == 0xC7)) || - (pdev->device == 0x7340 && pdev->revision == 0xC9) || - (pdev->device == 0x7360 && pdev->revision == 0xC7)) - return true; - return false; -} - -static int nv_reg_base_init(struct amdgpu_device *adev) -{ - int r; - - if (amdgpu_discovery) { - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - DRM_WARN("failed to init reg base from ip discovery table, " - "fallback to legacy init method\n"); - goto legacy_init; - } - - amdgpu_discovery_harvest_ip(adev); - if (nv_is_headless_sku(adev->pdev)) { - adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; - adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; - } - - return 0; - } - -legacy_init: - switch (adev->asic_type) { - case CHIP_NAVI10: - navi10_reg_base_init(adev); - break; - case CHIP_NAVI14: - navi14_reg_base_init(adev); - break; - case CHIP_NAVI12: - navi12_reg_base_init(adev); - break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - sienna_cichlid_reg_base_init(adev); - break; - case CHIP_VANGOGH: - vangogh_reg_base_init(adev); - break; - case CHIP_DIMGREY_CAVEFISH: - dimgrey_cavefish_reg_base_init(adev); - break; - case CHIP_BEIGE_GOBY: - beige_goby_reg_base_init(adev); - break; - case CHIP_YELLOW_CARP: - yellow_carp_reg_base_init(adev); - break; - case CHIP_CYAN_SKILLFISH: - cyan_skillfish_reg_base_init(adev); - break; - default: - return -EINVAL; - } - - return 0; -} - void nv_set_virt_ops(struct amdgpu_device *adev) { adev->virt.ops = &xgpu_nv_virt_ops; } -int nv_set_ip_blocks(struct amdgpu_device *adev) -{ - int r; - - if (adev->asic_type == CHIP_CYAN_SKILLFISH) { - adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - } else if (adev->flags & AMD_IS_APU) { - adev->nbio.funcs = &nbio_v7_2_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; - } else { - adev->nbio.funcs = &nbio_v2_3_funcs; - adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - } - adev->hdp.funcs = &hdp_v5_0_funcs; - - if (adev->asic_type >= CHIP_SIENNA_CICHLID) - adev->smuio.funcs = &smuio_v11_0_6_funcs; - else - adev->smuio.funcs = &smuio_v11_0_funcs; - - if (adev->asic_type == CHIP_SIENNA_CICHLID) - adev->gmc.xgmi.supported = true; - - /* Set IP register base before any HW register access */ - r = nv_reg_base_init(adev); - if (r) - return r; - - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - if (adev->enable_mes) - amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); - break; - case CHIP_NAVI12: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - !amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - break; - case CHIP_SIENNA_CICHLID: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - if (!amdgpu_sriov_vf(adev)) { - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } else { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - if (adev->enable_mes) - amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); - break; - case CHIP_NAVY_FLOUNDER: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - break; - case CHIP_VANGOGH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_DIMGREY_CAVEFISH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_BEIGE_GOBY: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - break; - case CHIP_YELLOW_CARP: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); - break; - case CHIP_CYAN_SKILLFISH: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - } - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - break; - default: - return -EINVAL; - } - - return 0; -} - static uint32_t nv_get_rev_id(struct amdgpu_device *adev) { return adev->nbio.funcs->get_rev_id(adev); @@ -1034,8 +732,10 @@ static int nv_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -1056,8 +756,11 @@ static int nv_common_early_init(void *handle) adev->rev_id = nv_get_rev_id(adev); adev->external_rev_id = 0xff; - switch (adev->asic_type) { - case CHIP_NAVI10: + /* TODO: split the GC and PG flags based on the relevant IP version for which + * they are relevant. + */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 1, 10): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_IH_CG | @@ -1079,7 +782,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; - case CHIP_NAVI14: + case IP_VERSION(10, 1, 1): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_IH_CG | @@ -1100,7 +803,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 20; break; - case CHIP_NAVI12: + case IP_VERSION(10, 1, 2): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CGCG | @@ -1129,7 +832,7 @@ static int nv_common_early_init(void *handle) adev->rev_id = 0; adev->external_rev_id = adev->rev_id + 0xa; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(10, 3, 0): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -1153,7 +856,7 @@ static int nv_common_early_init(void *handle) } adev->external_rev_id = adev->rev_id + 0x28; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(10, 3, 2): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -1172,8 +875,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_MMHUB; adev->external_rev_id = adev->rev_id + 0x32; break; - - case CHIP_VANGOGH: + case IP_VERSION(10, 3, 1): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | @@ -1196,7 +898,7 @@ static int nv_common_early_init(void *handle) if (adev->apu_flags & AMD_APU_IS_VANGOGH) adev->external_rev_id = adev->rev_id + 0x01; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(10, 3, 4): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -1215,7 +917,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_MMHUB; adev->external_rev_id = adev->rev_id + 0x3c; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(10, 3, 5): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | @@ -1232,7 +934,7 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_MMHUB; adev->external_rev_id = adev->rev_id + 0x46; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(10, 3, 3): adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CGCG | @@ -1257,11 +959,11 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; if (adev->pdev->device == 0x1681) - adev->external_rev_id = adev->rev_id + 0x19; + adev->external_rev_id = 0x20; else adev->external_rev_id = adev->rev_id + 0x01; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(10, 1, 3): adev->cg_flags = 0; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x82; @@ -1333,7 +1035,7 @@ static int nv_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); @@ -1388,14 +1090,14 @@ static int nv_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(2, 3, 0): + case IP_VERSION(2, 3, 1): + case IP_VERSION(2, 3, 2): + case IP_VERSION(3, 3, 0): + case IP_VERSION(3, 3, 1): + case IP_VERSION(3, 3, 2): + case IP_VERSION(3, 3, 3): adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 1f40ba3b0460..83e9782aef39 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -26,18 +26,10 @@ #include "nbio_v2_3.h" +extern const struct amdgpu_ip_block_version nv_common_ip_block; + void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void nv_set_virt_ops(struct amdgpu_device *adev); -int nv_set_ip_blocks(struct amdgpu_device *adev); -int navi10_reg_base_init(struct amdgpu_device *adev); -int navi14_reg_base_init(struct amdgpu_device *adev); -int navi12_reg_base_init(struct amdgpu_device *adev); -int sienna_cichlid_reg_base_init(struct amdgpu_device *adev); -void vangogh_reg_base_init(struct amdgpu_device *adev); -int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev); -int beige_goby_reg_base_init(struct amdgpu_device *adev); -int yellow_carp_reg_base_init(struct amdgpu_device *adev); -int cyan_skillfish_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5872d68ed13d..ed2293686f0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -84,28 +84,28 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = + adev->psp.hdcp_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = + adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = + adev->psp.hdcp_context.context.bin_desc.start_addr = (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.dtm.feature_version = + adev->psp.dtm_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = + adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); - adev->psp.securedisplay.feature_version = + adev->psp.securedisplay_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->securedisplay.fw_version); - adev->psp.securedisplay.size_bytes = + adev->psp.securedisplay_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->securedisplay.size_bytes); - adev->psp.securedisplay.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.securedisplay_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->securedisplay.offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 29bf9f09944b..2176ef85f137 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -93,35 +93,35 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA20: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 2): chip_name = "vega20"; break; - case CHIP_NAVI10: + case IP_VERSION(11, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): chip_name = "navi12"; break; - case CHIP_ARCTURUS: + case IP_VERSION(11, 0, 4): chip_name = "arcturus"; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): chip_name = "sienna_cichlid"; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(11, 0, 11): chip_name = "navy_flounder"; break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): chip_name = "vangogh"; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(11, 0, 12): chip_name = "dimgrey_cavefish"; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 13): chip_name = "beige_goby"; break; default: @@ -129,9 +129,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) } - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 4): err = psp_init_sos_microcode(psp, chip_name); if (err) return err; @@ -151,20 +151,26 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version); - adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes); - adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr + + adev->psp.xgmi_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->xgmi.fw_version); + adev->psp.xgmi_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->xgmi.size_bytes); + adev->psp.xgmi_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version); - adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes); - adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr + + adev->psp.ras_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->ras.fw_version); + adev->psp.ras_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->ras.size_bytes); + adev->psp.ras_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->ras.offset_bytes); } break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): err = psp_init_sos_microcode(psp, chip_name); if (err) return err; @@ -186,30 +192,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) goto out2; ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + adev->psp.hdcp_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->hdcp.fw_version); + adev->psp.hdcp_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->hdcp.size_bytes); + adev->psp.hdcp_context.context.bin_desc.start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu( + ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.fw_version = + le32_to_cpu(ta_hdr->dtm.fw_version); + adev->psp.dtm_context.context.bin_desc.size_bytes = + le32_to_cpu(ta_hdr->dtm.size_bytes); + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context + .bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); } break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - err = psp_init_sos_microcode(psp, chip_name); - if (err) - return err; - err = psp_init_ta_microcode(psp, chip_name); - if (err) - return err; - break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): err = psp_init_sos_microcode(psp, chip_name); if (err) return err; @@ -217,7 +224,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) if (err) return err; break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): err = psp_init_asd_microcode(psp, chip_name); if (err) return err; @@ -691,7 +698,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return -ENOMEM; } - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index cc649406234b..a2588200ea58 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -84,22 +84,22 @@ static int psp_v12_0_init_microcode(struct psp_context *psp) ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data; - adev->psp.hdcp.feature_version = + adev->psp.hdcp_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->hdcp.fw_version); - adev->psp.hdcp.size_bytes = + adev->psp.hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes); - adev->psp.hdcp.start_addr = + adev->psp.hdcp_context.context.bin_desc.start_addr = (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.dtm.feature_version = + adev->psp.dtm_context.context.bin_desc.fw_version = le32_to_cpu(ta_hdr->dtm.fw_version); - adev->psp.dtm.size_bytes = + adev->psp.dtm_context.context.bin_desc.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes); - adev->psp.dtm.start_addr = - (uint8_t *)adev->psp.hdcp.start_addr + + adev->psp.dtm_context.context.bin_desc.start_addr = + (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr + le32_to_cpu(ta_hdr->dtm.offset_bytes); } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 47a500f64db2..17655bc6d2f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -47,18 +47,19 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) const char *chip_name; int err = 0; - switch (adev->asic_type) { - case CHIP_ALDEBARAN: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): chip_name = "aldebaran"; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): chip_name = "yellow_carp"; break; default: BUG(); } - switch (adev->asic_type) { - case CHIP_ALDEBARAN: + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): err = psp_init_sos_microcode(psp, chip_name); if (err) return err; @@ -66,7 +67,8 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) if (err) return err; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): err = psp_init_asd_microcode(psp, chip_name); if (err) return err; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8931000dcd41..e8e4749e9c79 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -469,8 +469,8 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id) static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_4, ARRAY_SIZE(golden_settings_sdma_4)); @@ -478,7 +478,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg10, ARRAY_SIZE(golden_settings_sdma_vg10)); break; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): soc15_program_register_sequence(adev, golden_settings_sdma_4, ARRAY_SIZE(golden_settings_sdma_4)); @@ -486,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg12, ARRAY_SIZE(golden_settings_sdma_vg12)); break; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): soc15_program_register_sequence(adev, golden_settings_sdma0_4_2_init, ARRAY_SIZE(golden_settings_sdma0_4_2_init)); @@ -497,17 +497,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma1_4_2, ARRAY_SIZE(golden_settings_sdma1_4_2)); break; - case CHIP_ARCTURUS: + case IP_VERSION(4, 2, 2): soc15_program_register_sequence(adev, golden_settings_sdma_arct, ARRAY_SIZE(golden_settings_sdma_arct)); break; - case CHIP_ALDEBARAN: + case IP_VERSION(4, 4, 0): soc15_program_register_sequence(adev, golden_settings_sdma_aldebaran, ARRAY_SIZE(golden_settings_sdma_aldebaran)); break; - case CHIP_RAVEN: + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): soc15_program_register_sequence(adev, golden_settings_sdma_4_1, ARRAY_SIZE(golden_settings_sdma_4_1)); @@ -520,7 +521,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_rv1, ARRAY_SIZE(golden_settings_sdma_rv1)); break; - case CHIP_RENOIR: + case IP_VERSION(4, 1, 2): soc15_program_register_sequence(adev, golden_settings_sdma_4_3, ARRAY_SIZE(golden_settings_sdma_4_3)); @@ -538,12 +539,12 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev) * The only chips with SDMAv4 and ULV are VG10 and VG20. * Server SKUs take a different hysteresis setting from other SKUs. */ - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): if (adev->pdev->device == 0x6860) break; return; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): if (adev->pdev->device == 0x66a1) break; return; @@ -589,8 +590,8 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev) /* arcturus shares the same FW memory across all SDMA isntances */ - if (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || + adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) break; } @@ -620,17 +621,18 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): chip_name = "vega10"; break; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): chip_name = "vega12"; break; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): chip_name = "vega20"; break; - case CHIP_RAVEN: + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): if (adev->apu_flags & AMD_APU_IS_RAVEN2) chip_name = "raven2"; else if (adev->apu_flags & AMD_APU_IS_PICASSO) @@ -638,16 +640,16 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) else chip_name = "raven"; break; - case CHIP_ARCTURUS: + case IP_VERSION(4, 2, 2): chip_name = "arcturus"; break; - case CHIP_RENOIR: + case IP_VERSION(4, 1, 2): if (adev->apu_flags & AMD_APU_IS_RENOIR) chip_name = "renoir"; else chip_name = "green_sardine"; break; - case CHIP_ALDEBARAN: + case IP_VERSION(4, 4, 0): chip_name = "aldebaran"; break; default: @@ -665,8 +667,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) goto out; for (i = 1; i < adev->sdma.num_instances; i++) { - if (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) { + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) || + adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) { /* Acturus & Aldebaran will leverage the same FW memory for every SDMA instance */ memcpy((void *)&adev->sdma.instance[i], @@ -1106,7 +1108,7 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) * Arcturus for the moment and firmware version 14 * and above. */ - if (adev->asic_type == CHIP_ARCTURUS && + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && adev->sdma.instance[i].fw_version >= 14) WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); /* Extend page fault timeout to avoid interrupt storm */ @@ -1393,9 +1395,10 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA)) return; - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): sdma_v4_1_init_power_gating(adev); sdma_v4_1_update_power_gating(adev, true); break; @@ -1835,13 +1838,13 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev) { uint fw_version = adev->sdma.instance[0].fw_version; - switch (adev->asic_type) { - case CHIP_VEGA10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 0, 0): return fw_version >= 430; - case CHIP_VEGA12: + case IP_VERSION(4, 0, 1): /*return fw_version >= 31;*/ return false; - case CHIP_VEGA20: + case IP_VERSION(4, 2, 0): return fw_version >= 123; default: return false; @@ -1853,15 +1856,6 @@ static int sdma_v4_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - if (adev->flags & AMD_IS_APU) - adev->sdma.num_instances = 1; - else if (adev->asic_type == CHIP_ARCTURUS) - adev->sdma.num_instances = 8; - else if (adev->asic_type == CHIP_ALDEBARAN) - adev->sdma.num_instances = 5; - else - adev->sdma.num_instances = 2; - r = sdma_v4_0_init_microcode(adev); if (r) { DRM_ERROR("Failed to load sdma firmware!\n"); @@ -1869,7 +1863,8 @@ static int sdma_v4_0_early_init(void *handle) } /* TODO: Page queue breaks driver reload under SRIOV */ - if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) + if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) && + amdgpu_sriov_vf((adev))) adev->sdma.has_page_queue = false; else if (sdma_v4_0_fw_support_paging_queue(adev)) adev->sdma.has_page_queue = true; @@ -2141,14 +2136,14 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, amdgpu_fence_process(&adev->sdma.instance[instance].ring); break; case 1: - if (adev->asic_type == CHIP_VEGA20) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; case 2: /* XXX compute */ break; case 3: - if (adev->asic_type != CHIP_VEGA20) + if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0)) amdgpu_fence_process(&adev->sdma.instance[instance].page); break; } @@ -2364,9 +2359,10 @@ static int sdma_v4_0_set_powergating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 1, 0): + case IP_VERSION(4, 1, 1): + case IP_VERSION(4, 1, 2): sdma_v4_1_update_power_gating(adev, state == AMD_PG_STATE_GATE); break; @@ -2551,7 +2547,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->asic_type == CHIP_ARCTURUS && i >= 5) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs_2nd_mmhub; else @@ -2559,7 +2555,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; if (adev->sdma.has_page_queue) { - if (adev->asic_type == CHIP_ARCTURUS && i >= 5) + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5) adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs_2nd_mmhub; else @@ -2786,12 +2782,12 @@ static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 2, 0): + case IP_VERSION(4, 2, 2): adev->sdma.funcs = &sdma_v4_0_ras_funcs; break; - case CHIP_ALDEBARAN: + case IP_VERSION(4, 4, 0): adev->sdma.funcs = &sdma_v4_4_ras_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 50bf3b71bc93..853d1511b889 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -187,8 +187,8 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3 static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): soc15_program_register_sequence(adev, golden_settings_sdma_5, (const u32)ARRAY_SIZE(golden_settings_sdma_5)); @@ -196,7 +196,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv10, (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)); break; - case CHIP_NAVI14: + case IP_VERSION(5, 0, 2): soc15_program_register_sequence(adev, golden_settings_sdma_5, (const u32)ARRAY_SIZE(golden_settings_sdma_5)); @@ -204,7 +204,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv14, (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); break; - case CHIP_NAVI12: + case IP_VERSION(5, 0, 5): if (amdgpu_sriov_vf(adev)) soc15_program_register_sequence(adev, golden_settings_sdma_5_sriov, @@ -217,7 +217,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv12, (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(5, 0, 1): soc15_program_register_sequence(adev, golden_settings_sdma_cyan_skillfish, (const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish)); @@ -248,22 +248,22 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) const struct common_firmware_header *header = NULL; const struct sdma_firmware_header_v1_0 *hdr; - if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_NAVI12)) + if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5))) return 0; DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(5, 0, 2): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(5, 0, 5): chip_name = "navi12"; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(5, 0, 1): if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) chip_name = "cyan_skillfish2"; else @@ -1295,8 +1295,6 @@ static int sdma_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->sdma.num_instances = 2; - sdma_v5_0_set_ring_funcs(adev); sdma_v5_0_set_buffer_funcs(adev); sdma_v5_0_set_vm_pte_funcs(adev); @@ -1636,10 +1634,10 @@ static int sdma_v5_0_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 0, 0): + case IP_VERSION(5, 0, 2): + case IP_VERSION(5, 0, 5): sdma_v5_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); sdma_v5_0_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 779f5c911e11..4d4d1aa51b8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -136,23 +136,23 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 2, 0): chip_name = "sienna_cichlid"; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(5, 2, 2): chip_name = "navy_flounder"; break; - case CHIP_VANGOGH: + case IP_VERSION(5, 2, 1): chip_name = "vangogh"; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(5, 2, 4): chip_name = "dimgrey_cavefish"; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(5, 2, 5): chip_name = "beige_goby"; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(5, 2, 3): chip_name = "yellow_carp"; break; default: @@ -174,7 +174,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) (void *)&adev->sdma.instance[0], sizeof(struct amdgpu_sdma_instance)); - if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID)) + if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0))) return 0; DRM_DEBUG("psp_load == '%s'\n", @@ -375,10 +375,10 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring, */ static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring) { - uint32_t gcr_cntl = - SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | - SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | - SDMA_GCR_GLI_INV(1); + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | + SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV | + SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | + SDMA_GCR_GLI_INV(1); /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); @@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) msleep(1000); } + /* TODO: check whether can submit a doorbell request to raise + * a doorbell fence to exit gfxoff. + */ + if (adev->in_s0ix) + amdgpu_gfx_off_ctrl(adev, false); + sdma_v5_2_soft_reset(adev); /* unhalt the MEs */ sdma_v5_2_enable(adev, true); @@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); + if (adev->in_s0ix) + amdgpu_gfx_off_ctrl(adev, true); if (r) return r; r = sdma_v5_2_rlc_resume(adev); @@ -1209,23 +1217,6 @@ static int sdma_v5_2_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - adev->sdma.num_instances = 4; - break; - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - adev->sdma.num_instances = 2; - break; - case CHIP_VANGOGH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - adev->sdma.num_instances = 1; - break; - default: - break; - } - sdma_v5_2_set_ring_funcs(adev); sdma_v5_2_set_buffer_funcs(adev); sdma_v5_2_set_vm_pte_funcs(adev); @@ -1547,7 +1538,7 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH) + if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { @@ -1584,7 +1575,7 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH) + if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1)) adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS; if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { @@ -1613,13 +1604,13 @@ static int sdma_v5_2_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(5, 2, 0): + case IP_VERSION(5, 2, 2): + case IP_VERSION(5, 2, 1): + case IP_VERSION(5, 2, 4): + case IP_VERSION(5, 2, 5): + case IP_VERSION(5, 2, 3): sdma_v5_2_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); sdma_v5_2_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c deleted file mode 100644 index 5ee69f70c49b..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "sienna_cichlid_ip_offset.h" - -int sienna_cichlid_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc97c364fd7..de9b55383e9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -85,6 +85,8 @@ #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 +static const struct amd_ip_funcs soc15_common_ip_funcs; + /* Vega, Raven, Arcturus */ static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = { @@ -154,31 +156,38 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode = static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_VEGA10: - case CHIP_VEGA12: - if (encode) - *codecs = &vega_video_codecs_encode; - else - *codecs = &vega_video_codecs_decode; - return 0; - case CHIP_RAVEN: - if (encode) - *codecs = &vega_video_codecs_encode; - else - *codecs = &rv_video_codecs_decode; - return 0; - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_RENOIR: - if (encode) - *codecs = &vega_video_codecs_encode; - else - *codecs = &rn_video_codecs_decode; - return 0; - default: - return -EINVAL; + if (adev->ip_versions[VCE_HWIP][0]) { + switch (adev->ip_versions[VCE_HWIP][0]) { + case IP_VERSION(4, 0, 0): + case IP_VERSION(4, 1, 0): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &vega_video_codecs_decode; + return 0; + default: + return -EINVAL; + } + } else { + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &rv_video_codecs_decode; + return 0; + case IP_VERSION(2, 5, 0): + case IP_VERSION(2, 6, 0): + case IP_VERSION(2, 2, 0): + if (encode) + *codecs = &vega_video_codecs_encode; + else + *codecs = &rn_video_codecs_decode; + return 0; + default: + return -EINVAL; + } } } @@ -332,9 +341,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; - if (adev->asic_type == CHIP_RENOIR) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) return 10000; - if (adev->asic_type == CHIP_RAVEN) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1)) return reference_clock / 4; return reference_clock; @@ -565,28 +576,29 @@ soc15_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - switch (adev->asic_type) { - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(10, 0, 0): + case IP_VERSION(10, 0, 1): + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): return AMD_RESET_METHOD_MODE2; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_ARCTURUS: - baco_reset = amdgpu_dpm_is_baco_supported(adev); - break; - case CHIP_VEGA20: - if (adev->psp.sos.fw_version >= 0x80067) + case IP_VERSION(9, 0, 0): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_VEGA20) { + if (adev->psp.sos.fw_version >= 0x80067) + baco_reset = amdgpu_dpm_is_baco_supported(adev); + /* + * 1. PMFW version > 0x284300: all cases use baco + * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco + */ + if (ras && adev->ras_enabled && + adev->pm.fw_version <= 0x283400) + baco_reset = false; + } else { baco_reset = amdgpu_dpm_is_baco_supported(adev); - - /* - * 1. PMFW version > 0x284300: all cases use baco - * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco - */ - if (ras && adev->ras_enabled && - adev->pm.fw_version <= 0x283400) - baco_reset = false; + } break; - case CHIP_ALDEBARAN: + case IP_VERSION(13, 0, 2): /* * 1.connected to cpu: driver issue mode2 reset * 2.discret gpu: driver issue mode1 reset @@ -629,15 +641,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev) static bool soc15_supports_baco(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_ARCTURUS: - return amdgpu_dpm_is_baco_supported(adev); - case CHIP_VEGA20: - if (adev->psp.sos.fw_version >= 0x80067) + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(11, 0, 2): + if (adev->asic_type == CHIP_VEGA20) { + if (adev->psp.sos.fw_version >= 0x80067) + return amdgpu_dpm_is_baco_supported(adev); + return false; + } else { return amdgpu_dpm_is_baco_supported(adev); - return false; + } + break; default: return false; } @@ -704,7 +718,7 @@ static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } -static const struct amdgpu_ip_block_version vega10_common_ip_block = +const struct amdgpu_ip_block_version vega10_common_ip_block = { .type = AMD_IP_BLOCK_TYPE_COMMON, .major = 2, @@ -766,185 +780,6 @@ void soc15_set_virt_ops(struct amdgpu_device *adev) soc15_reg_base_init(adev); } -int soc15_set_ip_blocks(struct amdgpu_device *adev) -{ - /* for bare metal case */ - if (!amdgpu_sriov_vf(adev)) - soc15_reg_base_init(adev); - - if (adev->flags & AMD_IS_APU) { - adev->nbio.funcs = &nbio_v7_0_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; - } else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) { - adev->nbio.funcs = &nbio_v7_4_funcs; - adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; - } else { - adev->nbio.funcs = &nbio_v6_1_funcs; - adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; - } - adev->hdp.funcs = &hdp_v4_0_funcs; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) - adev->df.funcs = &df_v3_6_funcs; - else - adev->df.funcs = &df_v1_7_funcs; - - if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) - adev->smuio.funcs = &smuio_v11_0_funcs; - else if (adev->asic_type == CHIP_ALDEBARAN) - adev->smuio.funcs = &smuio_v13_0_funcs; - else - adev->smuio.funcs = &smuio_v9_0_funcs; - - adev->rev_id = soc15_get_rev_id(adev); - - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - /* For Vega10 SR-IOV, PSP need to be initialized before IH */ - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - else - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - } else { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - else - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - } - } - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (is_support_sw_smu(adev)) { - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - } - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); - } - break; - case CHIP_RAVEN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); - break; - case CHIP_ARCTURUS: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - } - - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); - } - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); - break; - case CHIP_RENOIR: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); -#if defined(CONFIG_DRM_AMD_DC) - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); -#endif - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); - break; - case CHIP_ALDEBARAN: - amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - - if (amdgpu_sriov_vf(adev)) { - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - } else { - amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); - } - - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - - amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); - amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); - break; - default: - return -EINVAL; - } - - return 0; -} - static bool soc15_need_full_reset(struct amdgpu_device *adev) { /* change this when we implement soft reset */ @@ -1136,8 +971,10 @@ static int soc15_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; @@ -1153,10 +990,13 @@ static int soc15_common_early_init(void *handle) adev->se_cac_rreg = &soc15_se_cac_rreg; adev->se_cac_wreg = &soc15_se_cac_wreg; - + adev->rev_id = soc15_get_rev_id(adev); adev->external_rev_id = 0xFF; - switch (adev->asic_type) { - case CHIP_VEGA10: + /* TODO: split the GC and PG flags based on the relevant IP version for which + * they are relevant. + */ + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1180,7 +1020,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = 0x1; break; - case CHIP_VEGA12: + case IP_VERSION(9, 2, 1): adev->asic_funcs = &soc15_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1203,7 +1043,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; - case CHIP_VEGA20: + case IP_VERSION(9, 4, 0): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1226,7 +1066,8 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x28; break; - case CHIP_RAVEN: + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): adev->asic_funcs = &soc15_asic_funcs; if (adev->rev_id >= 0x8) @@ -1299,7 +1140,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; } break; - case CHIP_ARCTURUS: + case IP_VERSION(9, 4, 1): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1318,7 +1159,7 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x32; break; - case CHIP_RENOIR: + case IP_VERSION(9, 3, 0): adev->asic_funcs = &soc15_asic_funcs; if (adev->apu_flags & AMD_APU_IS_RENOIR) @@ -1349,7 +1190,7 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_VCN_DPG; break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1446,7 +1287,7 @@ static int soc15_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ @@ -1564,10 +1405,10 @@ static int soc15_common_set_clockgating_state(void *handle, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(6, 1, 0): + case IP_VERSION(6, 2, 0): + case IP_VERSION(7, 4, 0): adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, @@ -1583,8 +1424,9 @@ static int soc15_common_set_clockgating_state(void *handle, adev->df.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); break; - case CHIP_RAVEN: - case CHIP_RENOIR: + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + case IP_VERSION(2, 5, 0): adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, @@ -1596,8 +1438,8 @@ static int soc15_common_set_clockgating_state(void *handle, soc15_update_drm_light_sleep(adev, state == AMD_CG_STATE_GATE); break; - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: + case IP_VERSION(7, 4, 1): + case IP_VERSION(7, 4, 4): adev->hdp.funcs->update_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -1619,7 +1461,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) adev->hdp.funcs->get_clock_gating_state(adev, flags); - if (adev->asic_type != CHIP_ALDEBARAN) { + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) { /* AMD_CG_SUPPORT_DRM_MGCG */ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); @@ -1645,7 +1487,7 @@ static int soc15_common_set_powergating_state(void *handle, return 0; } -const struct amd_ip_funcs soc15_common_ip_funcs = { +static const struct amd_ip_funcs soc15_common_ip_funcs = { .name = "soc15_common", .early_init = soc15_common_early_init, .late_init = soc15_common_late_init, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 034cfdfc4dbe..efc2a253e8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,11 +28,11 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" +extern const struct amdgpu_ip_block_version vega10_common_ip_block; + #define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 #define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 -extern const struct amd_ip_funcs soc15_common_ip_funcs; - struct soc15_reg_golden { u32 hwip; u32 instance; @@ -102,7 +102,6 @@ struct soc15_ras_field_entry { void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); void soc15_set_virt_ops(struct amdgpu_device *adev); -int soc15_set_ip_blocks(struct amdgpu_device *adev); void soc15_program_register_sequence(struct amdgpu_device *adev, const struct soc15_reg_golden *registers, diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 0f214a398dd8..5093826a43d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -38,9 +38,8 @@ enum ras_command { TA_RAS_COMMAND__TRIGGER_ERROR, }; -enum ta_ras_status -{ - TA_RAS_STATUS__SUCCESS = 0x00, +enum ta_ras_status { + TA_RAS_STATUS__SUCCESS = 0x0000, TA_RAS_STATUS__RESET_NEEDED = 0xA001, TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002, TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, @@ -55,7 +54,17 @@ enum ta_ras_status TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C, TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, - TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F + TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013, + TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + TA_RAS_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019 }; enum ta_ras_block { @@ -73,9 +82,18 @@ enum ta_ras_block { TA_RAS_BLOCK__MP0, TA_RAS_BLOCK__MP1, TA_RAS_BLOCK__FUSE, + TA_RAS_BLOCK__MCA, TA_NUM_BLOCK_MAX }; +enum ta_ras_mca_block { + TA_RAS_MCA_BLOCK__MP0 = 0, + TA_RAS_MCA_BLOCK__MP1 = 1, + TA_RAS_MCA_BLOCK__MPIO = 2, + TA_RAS_MCA_BLOCK__IOHC = 3, + TA_MCA_NUM_BLOCK_MAX +}; + enum ta_ras_error_type { TA_RAS_ERROR__NONE = 0, TA_RAS_ERROR__PARITY = 1, @@ -105,17 +123,15 @@ struct ta_ras_trigger_error_input { uint64_t value; // method if error injection. i.e persistent, coherent etc. }; -struct ta_ras_init_flags -{ - uint8_t poison_mode_en; - uint8_t dgpu_mode; +struct ta_ras_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; }; -struct ta_ras_output_flags -{ - uint8_t ras_init_success_flag; - uint8_t err_inject_switch_disable_flag; - uint8_t reg_access_failure_flag; +struct ta_ras_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; }; /* Common input structure for RAS callbacks */ @@ -126,14 +142,13 @@ union ta_ras_cmd_input { struct ta_ras_disable_features_input disable_features; struct ta_ras_trigger_error_input trigger_error; - uint32_t reserve_pad[256]; + uint32_t reserve_pad[256]; }; -union ta_ras_cmd_output -{ - struct ta_ras_output_flags flags; +union ta_ras_cmd_output { + struct ta_ras_output_flags flags; - uint32_t reserve_pad[256]; + uint32_t reserve_pad[256]; }; /* Shared Memory structures */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index bb30336b1e8d..6dd1e19e8d43 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -50,6 +50,165 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; } +static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; +} + +static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt; + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* + * select the lower chip and check the error count + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip; + + /* + * select the higher chip and check the err counter + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip; + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* check the MCUMC_STATUS */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t channel_index = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC registers. Will add the protection */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + channel_index = get_umc_v6_7_channel_index(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_correctable_error_count(adev, + channel_index, + &(err_data->ce_count)); + umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, + channel_index, + &(err_data->ue_count)); + } +} + +static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; + uint32_t channel_index; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) + return; + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } +} + +static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC resgisters. Will add the protection + * when firmware interface is ready */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -288,9 +447,45 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev, } } +static uint32_t umc_v6_7_query_ras_poison_mode_per_channel( + struct amdgpu_device *adev, + uint32_t umc_reg_offset) +{ + uint32_t ecc_ctrl_addr, ecc_ctrl; + + ecc_ctrl_addr = + SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl); + ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr + + umc_reg_offset) * 4); + + return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn); +} + +static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev) +{ + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + /* Enabling fatal error in one channel will be considered + as fatal error mode */ + if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset)) + return false; + } + + return true; +} + const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { .ras_late_init = amdgpu_umc_ras_late_init, .ras_fini = amdgpu_umc_ras_fini, .query_ras_error_count = umc_v6_7_query_ras_error_count, .query_ras_error_address = umc_v6_7_query_ras_error_address, + .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, + .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 7232241e3bfb..0fef925b6602 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v3_1_stop(adev); + + return 0; +} + +static int uvd_v3_1_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v3_1_stop(adev); - - return 0; -} - -static int uvd_v3_1_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v3_1_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 52d6de969f46..c108b8381795 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v4_2_stop(adev); + + return 0; +} + +static int uvd_v4_2_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v4_2_stop(adev); - - return 0; -} - -static int uvd_v4_2_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v4_2_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index db6d06758e4d..563493d1f830 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v5_0_stop(adev); + + return 0; +} + +static int uvd_v5_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v5_0_stop(adev); - - return 0; -} - -static int uvd_v5_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v5_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index bc571833632e..2d558c2f417d 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -332,15 +332,9 @@ err: static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -357,9 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -543,6 +534,19 @@ static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + + return 0; +} + +static int uvd_v6_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -567,17 +571,6 @@ static int uvd_v6_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (RREG32(mmUVD_STATUS) != 0) - uvd_v6_0_stop(adev); - - return 0; -} - -static int uvd_v6_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v6_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index b6e82d75561f..b483f03b4591 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -338,15 +338,9 @@ err: static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); - if (r) - return r; - r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL); if (r) goto error; @@ -363,9 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -606,6 +597,23 @@ static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->uvd.idle_work); + + if (!amdgpu_sriov_vf(adev)) + uvd_v7_0_stop(adev); + else { + /* full access mode, so don't touch any UVD register */ + DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); + } + + return 0; +} + +static int uvd_v7_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -630,21 +638,6 @@ static int uvd_v7_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - if (!amdgpu_sriov_vf(adev)) - uvd_v7_0_stop(adev); - else { - /* full access mode, so don't touch any UVD register */ - DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); - } - - return 0; -} - -static int uvd_v7_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = uvd_v7_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c deleted file mode 100644 index d64d681a05dc..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "vangogh_ip_offset.h" - -void vangogh_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } -} diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index b70c17f0c52e..67eb01fef789 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -479,6 +481,17 @@ static int vce_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vce.idle_work); + + return 0; +} + +static int vce_v2_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -502,14 +515,6 @@ static int vce_v2_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - return 0; -} - -static int vce_v2_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v2_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 9de66893ccd6..142e291983b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -440,10 +440,12 @@ static int vce_v3_0_sw_init(void *handle) return r; for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -490,6 +492,21 @@ static int vce_v3_0_hw_fini(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vce.idle_work); + + r = vce_v3_0_wait_for_idle(handle); + if (r) + return r; + + vce_v3_0_stop(adev); + return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); +} + +static int vce_v3_0_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* * Proper cleanups before halting the HW engine: * - cancel the delayed idle work @@ -513,19 +530,6 @@ static int vce_v3_0_hw_fini(void *handle) AMD_CG_STATE_GATE); } - r = vce_v3_0_wait_for_idle(handle); - if (r) - return r; - - vce_v3_0_stop(adev); - return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); -} - -static int vce_v3_0_suspend(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v3_0_hw_fini(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index fec902b800c2..d1fc4e0b8265 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -463,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle) } for (i = 0; i < adev->vce.num_rings; i++) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i); + ring = &adev->vce.ring[i]; sprintf(ring->name, "vce%d", i); if (amdgpu_sriov_vf(adev)) { @@ -478,7 +480,7 @@ static int vce_v4_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1; } r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -542,29 +544,8 @@ static int vce_v4_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* - * Proper cleanups before halting the HW engine: - * - cancel the delayed idle work - * - enable powergating - * - enable clockgating - * - disable dpm - * - * TODO: to align with the VCN implementation, move the - * jobs for clockgating/powergating/dpm setting to - * ->set_powergating_state(). - */ cancel_delayed_work_sync(&adev->vce.idle_work); - if (adev->pm.dpm_enabled) { - amdgpu_dpm_enable_vce(adev, false); - } else { - amdgpu_asic_set_vce_clocks(adev, 0, 0); - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - } - if (!amdgpu_sriov_vf(adev)) { /* vce_v4_0_wait_for_idle(handle); */ vce_v4_0_stop(adev); @@ -584,7 +565,7 @@ static int vce_v4_0_suspend(void *handle) if (adev->vce.vcpu_bo == NULL) return 0; - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); void *ptr = adev->vce.cpu_addr; @@ -594,6 +575,29 @@ static int vce_v4_0_suspend(void *handle) drm_dev_exit(idx); } + /* + * Proper cleanups before halting the HW engine: + * - cancel the delayed idle work + * - enable powergating + * - enable clockgating + * - disable dpm + * + * TODO: to align with the VCN implementation, move the + * jobs for clockgating/powergating/dpm setting to + * ->set_powergating_state(). + */ + cancel_delayed_work_sync(&adev->vce.idle_work); + + if (adev->pm.dpm_enabled) { + amdgpu_dpm_enable_vce(adev, false); + } else { + amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + } + r = vce_v4_0_hw_fini(adev); if (r) return r; @@ -611,7 +615,7 @@ static int vce_v4_0_resume(void *handle) if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); void *ptr = adev->vce.cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 121ee9f2b8d1..d54d720b3cf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -66,7 +66,6 @@ static int vcn_v1_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v1_0_set_dec_ring_funcs(adev); @@ -112,15 +111,7 @@ static int vcn_v1_0_sw_init(void *handle) /* Override the work func */ adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -145,10 +136,12 @@ static int vcn_v1_0_sw_init(void *handle) SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index f4686e918e0d..313fc1b53999 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -68,7 +69,6 @@ static int vcn_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->vcn.num_vcn_inst = 1; if (amdgpu_sriov_vf(adev)) adev->vcn.num_enc_rings = 1; else @@ -115,15 +115,7 @@ static int vcn_v2_0_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -159,6 +151,8 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; if (!amdgpu_sriov_vf(adev)) @@ -167,7 +161,7 @@ static int vcn_v2_0_sw_init(void *handle) ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -192,11 +186,14 @@ static int vcn_v2_0_sw_init(void *handle) */ static int vcn_v2_0_sw_fini(void *handle) { - int r; + int r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr; - fw_shared->present_flag_0 = 0; + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + fw_shared->present_flag_0 = 0; + drm_dev_exit(idx); + } amdgpu_virt_free_mm_table(adev); @@ -1879,15 +1876,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev) /* mc resume*/ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - tmp = AMDGPU_UCODE_ID_VCN; MMSCH_V2_0_INSERT_DIRECT_WT( SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - adev->firmware.ucode[tmp].tmr_mc_addr_lo); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo); MMSCH_V2_0_INSERT_DIRECT_WT( SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - adev->firmware.ucode[tmp].tmr_mc_addr_hi); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi); offset = 0; } else { MMSCH_V2_0_INSERT_DIRECT_WT( diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index e0c0c3734432..44fc4c218433 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -22,6 +22,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vcn.h" @@ -82,7 +83,7 @@ static int vcn_v2_5_early_init(void *handle) } else { u32 harvest; int i; - adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) @@ -138,22 +139,7 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - } - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -194,6 +180,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); + ring = &adev->vcn.inst[j].ring_enc[i]; ring->use_doorbell = true; @@ -203,7 +191,7 @@ static int vcn_v2_5_sw_init(void *handle) sprintf(ring->name, "vcn_enc_%d.%d", j, i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0, - AMDGPU_RING_PRIO_DEFAULT, NULL); + hw_prio, NULL); if (r) return r; } @@ -233,17 +221,21 @@ static int vcn_v2_5_sw_init(void *handle) */ static int vcn_v2_5_sw_fini(void *handle) { - int i, r; + int i, r, idx; struct amdgpu_device *adev = (struct amdgpu_device *)handle; volatile struct amdgpu_fw_shared *fw_shared; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; - fw_shared->present_flag_0 = 0; + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + } + drm_dev_exit(idx); } + if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); @@ -1713,7 +1705,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; else /* CHIP_ALDEBARAN */ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs; @@ -1730,7 +1722,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) if (adev->vcn.harvest_config & (1 << j)) continue; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0)) adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; else /* CHIP_ALDEBARAN */ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3d18aab88b4e..da11ceba0698 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -60,11 +60,6 @@ static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN1 }; -static int amdgpu_ucode_id_vcns[] = { - AMDGPU_UCODE_ID_VCN, - AMDGPU_UCODE_ID_VCN1 -}; - static int vcn_v3_0_start_sriov(struct amdgpu_device *adev); static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -87,7 +82,6 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); static int vcn_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; @@ -95,24 +89,12 @@ static int vcn_v3_0_early_init(void *handle) adev->vcn.num_enc_rings = 1; } else { - if (adev->asic_type == CHIP_SIENNA_CICHLID) { - u32 harvest; - - adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - adev->vcn.harvest_config |= 1 << i; - } - - if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) - /* both instances are harvested, disable the block */ - return -ENOENT; - } else - adev->vcn.num_vcn_inst = 1; + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; - if (adev->asic_type == CHIP_BEIGE_GOBY) + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33)) adev->vcn.num_enc_rings = 0; else adev->vcn.num_enc_rings = 2; @@ -143,22 +125,7 @@ static int vcn_v3_0_sw_init(void *handle) if (r) return r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - const struct common_firmware_header *hdr; - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - - if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) { - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; - adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - } - dev_info(adev->dev, "Will use PSP to load VCN firmware\n"); - } + amdgpu_vcn_setup_ucode(adev); r = amdgpu_vcn_resume(adev); if (r) @@ -224,6 +191,8 @@ static int vcn_v3_0_sw_init(void *handle) return r; for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j); + /* VCN ENC TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq); @@ -239,8 +208,7 @@ static int vcn_v3_0_sw_init(void *handle) } sprintf(ring->name, "vcn_enc_%d.%d", i, j); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, - AMDGPU_RING_PRIO_DEFAULT, - &adev->vcn.inst[i].sched_score); + hw_prio, &adev->vcn.inst[i].sched_score); if (r) return r; } @@ -275,7 +243,7 @@ static int vcn_v3_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r, idx; - if (drm_dev_enter(&adev->ddev, &idx)) { + if (drm_dev_enter(adev_to_drm(adev), &idx)) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) { volatile struct amdgpu_fw_shared *fw_shared; @@ -1271,7 +1239,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) fw_shared->rb.wptr = lower_32_bits(ring->wptr); fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - if (adev->asic_type != CHIP_BEIGE_GOBY) { + if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); ring = &adev->vcn.inst[i].ring_enc[0]; WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); @@ -1305,7 +1273,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) uint32_t param, resp, expected; uint32_t offset, cache_size; uint32_t tmp, timeout; - uint32_t id; struct amdgpu_mm_table *table = &adev->virt.mm_table; uint32_t *table_loc; @@ -1349,13 +1316,12 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - id = amdgpu_ucode_id_vcns[i]; MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - adev->firmware.ucode[id].tmr_mc_addr_lo); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - adev->firmware.ucode[id].tmr_mc_addr_hi); + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); offset = 0; MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), @@ -1643,7 +1609,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); - if (adev->asic_type != CHIP_BEIGE_GOBY) { + if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { /* Restore */ fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); diff --git a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c deleted file mode 100644 index 3d89421275ed..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#include "amdgpu.h" -#include "nv.h" - -#include "soc15_common.h" -#include "soc15_hw_ip.h" -#include "yellow_carp_offset.h" - -int yellow_carp_reg_base_init(struct amdgpu_device *adev) -{ - /* HW has more IP blocks, only initialized the block needed by driver */ - uint32_t i; - for (i = 0 ; i < MAX_INSTANCE ; ++i) { - adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); - adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); - adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); - adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); - adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); - adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); - adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); - adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); - adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); - adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); - adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); - adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); - adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); - adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); - } - return 0; -} diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index f6233019f042..d60576ce10cd 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -43,15 +43,15 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, */ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT || ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) && - dev->device_info->asic_family == CHIP_HAWAII) { + dev->adev->asic_type == CHIP_HAWAII) { struct cik_ih_ring_entry *tmp_ihre = (struct cik_ih_ring_entry *)patched_ihre; *patched_flag = true; *tmp_ihre = *ihre; - vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); + vmid = f2g->read_vmid_from_vmfault_reg(dev->adev); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; @@ -113,7 +113,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev, kfd_process_vm_fault(dev->dqm, pasid); memset(&info, 0, sizeof(info)); - amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info); + amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info); if (!info.page_addr && !info.status) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 86afd37b098d..4bfc0c8ab764 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - if (KFD_IS_SOC15(dev->device_info->asic_family)) + if (KFD_IS_SOC15(dev)) /* On SOC15 ASICs, include the doorbell offset within the * process doorbell frame, which is 2 pages. */ @@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, mutex_lock(&p->mutex); - retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); + retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); mutex_unlock(&p->mutex); @@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, int retval; const int max_num_cus = 1024; struct kfd_ioctl_set_cu_mask_args *args = data; - struct queue_properties properties; + struct mqd_update_info minfo = {0}; uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); @@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, return -EINVAL; } - properties.cu_mask_count = args->num_cu_mask; - if (properties.cu_mask_count == 0) { + minfo.cu_mask.count = args->num_cu_mask; + if (minfo.cu_mask.count == 0) { pr_debug("CU mask cannot be 0"); return -EINVAL; } @@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, * limit of max_num_cus bits. We can then just drop any CU mask bits * past max_num_cus bits and just use the first max_num_cus bits. */ - if (properties.cu_mask_count > max_num_cus) { + if (minfo.cu_mask.count > max_num_cus) { pr_debug("CU mask cannot be greater than 1024 bits"); - properties.cu_mask_count = max_num_cus; + minfo.cu_mask.count = max_num_cus; cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); } - properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL); - if (!properties.cu_mask) + minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); + if (!minfo.cu_mask.ptr) return -ENOMEM; - retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size); + retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); if (retval) { pr_debug("Could not copy CU mask from userspace"); - kfree(properties.cu_mask); - return -EFAULT; + retval = -EFAULT; + goto out; } + minfo.update_flag = UPDATE_FLAG_CU_MASK; + mutex_lock(&p->mutex); - retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties); + retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); mutex_unlock(&p->mutex); - if (retval) - kfree(properties.cu_mask); - +out: + kfree(minfo.cu_mask.ptr); return retval; } @@ -579,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); return -EINVAL; } @@ -630,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, if (!dev || !dev->dbgmgr) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); return -EINVAL; } @@ -675,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -783,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -850,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, dev = kfd_device_by_id(args->gpu_id); if (dev) /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev); else /* Node without GPU resource */ args->gpu_clock_counter = 0; @@ -1011,11 +1012,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, void *mem, *kern_addr; uint64_t size; - if (p->signal_page) { - pr_err("Event page is already set\n"); - return -EINVAL; - } - kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); if (!kfd) { pr_err("Getting device by id failed in %s\n", __func__); @@ -1023,6 +1019,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, } mutex_lock(&p->mutex); + + if (p->signal_page) { + pr_err("Event page is already set\n"); + err = -EINVAL; + goto out_unlock; + } + pdd = kfd_bind_process_to_device(kfd, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); @@ -1037,20 +1040,24 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, err = -EINVAL; goto out_unlock; } - mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev, mem, &kern_addr, &size); if (err) { pr_err("Failed to map event page to kernel\n"); - return err; + goto out_unlock; } err = kfd_event_page_set(p, kern_addr, size); if (err) { pr_err("Failed to set event page\n"); - return err; + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem); + goto out_unlock; } + + p->signal_handle = args->event_page_offset; + + mutex_unlock(&p->mutex); } err = kfd_event_create(filp, p, args->event_type, @@ -1130,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( - dev->kgd, args->va_addr, pdd->qpd.vmid); + dev->adev, args->va_addr, pdd->qpd.vmid); return 0; @@ -1151,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - amdgpu_amdkfd_get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->adev, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; @@ -1237,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) if (dev->use_iommu_v2) return false; - amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info); if (mem_info.local_mem_size_private == 0 && mem_info.local_mem_size_public > 0) return true; @@ -1259,6 +1266,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (args->size == 0) return -EINVAL; +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) + /* Flush pending deferred work to avoid racing with deferred actions + * from previous memory map changes (e.g. munmap). + */ + svm_range_list_lock_and_flush_work(&p->svms, current->mm); + mutex_lock(&p->svms.lock); + mmap_write_unlock(current->mm); + if (interval_tree_iter_first(&p->svms.objects, + args->va_addr >> PAGE_SHIFT, + (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { + pr_err("Address: 0x%llx already allocated by SVM\n", + args->va_addr); + mutex_unlock(&p->svms.lock); + return -EADDRINUSE; + } + mutex_unlock(&p->svms.lock); +#endif dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; @@ -1289,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, err = -EINVAL; goto err_unlock; } - offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + offset = dev->adev->rmmio_remap.bus_addr; if (!offset) { err = -ENOMEM; goto err_unlock; @@ -1297,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - dev->kgd, args->va_addr, args->size, + dev->adev, args->va_addr, args->size, pdd->drm_priv, (struct kgd_mem **) &mem, &offset, flags); @@ -1329,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -1351,6 +1375,15 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, return -EINVAL; mutex_lock(&p->mutex); + /* + * Safeguard to prevent user space from freeing signal BO. + * It will be freed at process termination. + */ + if (p->signal_handle && (p->signal_handle == args->handle)) { + pr_err("Free signal BO is not allowed\n"); + ret = -EPERM; + goto err_unlock; + } pdd = kfd_get_process_device_data(dev, p); if (!pdd) { @@ -1366,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, goto err_unlock; } - ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, + ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, &size); /* If freeing the buffer failed, leave the handle in place for @@ -1451,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed); if (err) { pr_err("Failed to map to gpu %d/%d\n", @@ -1463,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; @@ -1560,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to unmap from gpu %d/%d\n", i, args->n_devices); @@ -1570,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(&p->mutex); - if (dev->device_info->asic_family == CHIP_ALDEBARAN) { - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) { + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); @@ -1647,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, { struct kfd_ioctl_get_dmabuf_info_args *args = data; struct kfd_dev *dev = NULL; - struct kgd_dev *dma_buf_kgd; + struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; unsigned int i; @@ -1667,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, } /* Get dmabuf info from KGD */ - r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, - &dma_buf_kgd, &args->size, + r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, + &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, &args->metadata_size, &flags); if (r) goto exit; /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_kgd(dma_buf_kgd); + dev = kfd_device_by_adev(dmabuf_adev); if (!dev) { r = -EINVAL; goto exit; @@ -1725,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf, + r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf, args->va_addr, pdd->drm_priv, (struct kgd_mem **)&mem, &size, NULL); @@ -1746,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -2033,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; - address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + address = dev->adev->rmmio_remap.bus_addr; vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cfedfb1e8596..f187596faf66 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1340,7 +1340,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, int ret; unsigned int num_cu_shared; - switch (kdev->device_info->asic_family) { + switch (kdev->adev->asic_type) { case CHIP_KAVERI: pcache_info = kaveri_cache_info; num_of_cache_types = ARRAY_SIZE(kaveri_cache_info); @@ -1377,67 +1377,71 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = vegam_cache_info; num_of_cache_types = ARRAY_SIZE(vegam_cache_info); break; - case CHIP_VEGA10: - pcache_info = vega10_cache_info; - num_of_cache_types = ARRAY_SIZE(vega10_cache_info); - break; - case CHIP_VEGA12: - pcache_info = vega12_cache_info; - num_of_cache_types = ARRAY_SIZE(vega12_cache_info); - break; - case CHIP_VEGA20: - case CHIP_ARCTURUS: - pcache_info = vega20_cache_info; - num_of_cache_types = ARRAY_SIZE(vega20_cache_info); - break; - case CHIP_ALDEBARAN: - pcache_info = aldebaran_cache_info; - num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); - break; - case CHIP_RAVEN: - pcache_info = raven_cache_info; - num_of_cache_types = ARRAY_SIZE(raven_cache_info); - break; - case CHIP_RENOIR: - pcache_info = renoir_cache_info; - num_of_cache_types = ARRAY_SIZE(renoir_cache_info); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_CYAN_SKILLFISH: - pcache_info = navi10_cache_info; - num_of_cache_types = ARRAY_SIZE(navi10_cache_info); - break; - case CHIP_NAVI14: - pcache_info = navi14_cache_info; - num_of_cache_types = ARRAY_SIZE(navi14_cache_info); - break; - case CHIP_SIENNA_CICHLID: - pcache_info = sienna_cichlid_cache_info; - num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); - break; - case CHIP_NAVY_FLOUNDER: - pcache_info = navy_flounder_cache_info; - num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); - break; - case CHIP_DIMGREY_CAVEFISH: - pcache_info = dimgrey_cavefish_cache_info; - num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); - break; - case CHIP_VANGOGH: - pcache_info = vangogh_cache_info; - num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); - break; - case CHIP_BEIGE_GOBY: - pcache_info = beige_goby_cache_info; - num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); - break; - case CHIP_YELLOW_CARP: - pcache_info = yellow_carp_cache_info; - num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); - break; default: - return -EINVAL; + switch(KFD_GC_VERSION(kdev)) { + case IP_VERSION(9, 0, 1): + pcache_info = vega10_cache_info; + num_of_cache_types = ARRAY_SIZE(vega10_cache_info); + break; + case IP_VERSION(9, 2, 1): + pcache_info = vega12_cache_info; + num_of_cache_types = ARRAY_SIZE(vega12_cache_info); + break; + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + pcache_info = vega20_cache_info; + num_of_cache_types = ARRAY_SIZE(vega20_cache_info); + break; + case IP_VERSION(9, 4, 2): + pcache_info = aldebaran_cache_info; + num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + pcache_info = raven_cache_info; + num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; + case IP_VERSION(9, 3, 0): + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + pcache_info = navi10_cache_info; + num_of_cache_types = ARRAY_SIZE(navi10_cache_info); + break; + case IP_VERSION(10, 1, 1): + pcache_info = navi14_cache_info; + num_of_cache_types = ARRAY_SIZE(navi14_cache_info); + break; + case IP_VERSION(10, 3, 0): + pcache_info = sienna_cichlid_cache_info; + num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); + break; + case IP_VERSION(10, 3, 2): + pcache_info = navy_flounder_cache_info; + num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); + break; + case IP_VERSION(10, 3, 4): + pcache_info = dimgrey_cavefish_cache_info; + num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); + break; + case IP_VERSION(10, 3, 1): + pcache_info = vangogh_cache_info; + num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); + break; + case IP_VERSION(10, 3, 5): + pcache_info = beige_goby_cache_info; + num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); + break; + case IP_VERSION(10, 3, 3): + pcache_info = yellow_carp_cache_info; + num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); + break; + default: + return -EINVAL; + } } *size_filled = 0; @@ -1963,8 +1967,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { - struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd; - *avail_size -= sizeof(struct crat_subtype_iolink); if (*avail_size < 0) return -ENOMEM; @@ -1981,7 +1983,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, /* Fill in IOLINK subtype. * TODO: Fill-in other fields of iolink subtype */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (kdev->adev->gmc.xgmi.connected_to_cpu) { /* * with host gpu xgmi link, host can access gpu memory whether * or not pcie bar type is large, so always create bidirectional @@ -1990,19 +1992,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->num_hops_xgmi = 1; - if (adev->asic_type == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) { sub_type_hdr->minimum_bandwidth_mbs = amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( - kdev->kgd, NULL, true); + kdev->adev, NULL, true); sub_type_hdr->maximum_bandwidth_mbs = sub_type_hdr->minimum_bandwidth_mbs; } } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; sub_type_hdr->minimum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false); } sub_type_hdr->proximity_domain_from = proximity_domain; @@ -2044,11 +2046,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->proximity_domain_from = proximity_domain_from; sub_type_hdr->proximity_domain_to = proximity_domain_to; sub_type_hdr->num_hops_xgmi = - amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd); + amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false); + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false); sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0; + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; return 0; } @@ -2114,7 +2116,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; cu->proximity_domain = proximity_domain; - amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info); cu->num_simd_per_cu = cu_info.simd_per_cu; cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; cu->max_waves_simd = cu_info.max_waves_per_simd; @@ -2145,7 +2147,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info); sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index 159add0f5aaa..1e30717b5253 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -41,7 +41,7 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev) { - dev->kfd2kgd->address_watch_disable(dev->kgd); + dev->kfd2kgd->address_watch_disable(dev->adev); } static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, @@ -322,7 +322,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); pdd->dev->kfd2kgd->address_watch_execute( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, cntl.u32All, addrHi.u32All, @@ -420,7 +420,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -431,7 +431,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_HI); @@ -441,7 +441,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_LO); @@ -457,7 +457,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -752,7 +752,7 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd, + return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); } @@ -784,7 +784,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info - (dev->kgd, vmid, &queried_pasid); + (dev->adev, vmid, &queried_pasid); if (status && queried_pasid == p->pasid) { pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", @@ -811,7 +811,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) /* for non DIQ we need to patch the VMID: */ reg_sq_cmd.bits.vm_id = vmid; - dev->kfd2kgd->wave_control_execute(dev->kgd, + dev->kfd2kgd->wave_control_execute(dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 16a57b70cc1a..e1294fba0c26 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -32,6 +32,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" #include "kfd_migrate.h" +#include "amdgpu.h" #define MQD_SIZE_ALIGNED 768 @@ -52,44 +53,8 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; -static const struct kfd2kgd_calls *kfd2kgd_funcs[] = { -#ifdef KFD_SUPPORT_IOMMU_V2 -#ifdef CONFIG_DRM_AMDGPU_CIK - [CHIP_KAVERI] = &gfx_v7_kfd2kgd, -#endif - [CHIP_CARRIZO] = &gfx_v8_kfd2kgd, - [CHIP_RAVEN] = &gfx_v9_kfd2kgd, -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK - [CHIP_HAWAII] = &gfx_v7_kfd2kgd, -#endif - [CHIP_TONGA] = &gfx_v8_kfd2kgd, - [CHIP_FIJI] = &gfx_v8_kfd2kgd, - [CHIP_POLARIS10] = &gfx_v8_kfd2kgd, - [CHIP_POLARIS11] = &gfx_v8_kfd2kgd, - [CHIP_POLARIS12] = &gfx_v8_kfd2kgd, - [CHIP_VEGAM] = &gfx_v8_kfd2kgd, - [CHIP_VEGA10] = &gfx_v9_kfd2kgd, - [CHIP_VEGA12] = &gfx_v9_kfd2kgd, - [CHIP_VEGA20] = &gfx_v9_kfd2kgd, - [CHIP_RENOIR] = &gfx_v9_kfd2kgd, - [CHIP_ARCTURUS] = &arcturus_kfd2kgd, - [CHIP_ALDEBARAN] = &aldebaran_kfd2kgd, - [CHIP_NAVI10] = &gfx_v10_kfd2kgd, - [CHIP_NAVI12] = &gfx_v10_kfd2kgd, - [CHIP_NAVI14] = &gfx_v10_kfd2kgd, - [CHIP_SIENNA_CICHLID] = &gfx_v10_3_kfd2kgd, - [CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd, - [CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd, - [CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd, - [CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd, - [CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd, - [CHIP_CYAN_SKILLFISH] = &gfx_v10_kfd2kgd, -}; - #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { - .asic_family = CHIP_KAVERI, .asic_name = "kaveri", .gfx_target_version = 70000, .max_pasid_bits = 16, @@ -103,13 +68,10 @@ static const struct kfd_device_info kaveri_device_info = { .supports_cwsr = false, .needs_iommu_device = true, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info carrizo_device_info = { - .asic_family = CHIP_CARRIZO, .asic_name = "carrizo", .gfx_target_version = 80001, .max_pasid_bits = 16, @@ -123,14 +85,10 @@ static const struct kfd_device_info carrizo_device_info = { .supports_cwsr = true, .needs_iommu_device = true, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; -#endif static const struct kfd_device_info raven_device_info = { - .asic_family = CHIP_RAVEN, .asic_name = "raven", .gfx_target_version = 90002, .max_pasid_bits = 16, @@ -143,13 +101,12 @@ static const struct kfd_device_info raven_device_info = { .supports_cwsr = true, .needs_iommu_device = true, .needs_pci_atomics = true, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK static const struct kfd_device_info hawaii_device_info = { - .asic_family = CHIP_HAWAII, .asic_name = "hawaii", .gfx_target_version = 70001, .max_pasid_bits = 16, @@ -163,13 +120,11 @@ static const struct kfd_device_info hawaii_device_info = { .supports_cwsr = false, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; +#endif static const struct kfd_device_info tonga_device_info = { - .asic_family = CHIP_TONGA, .asic_name = "tonga", .gfx_target_version = 80002, .max_pasid_bits = 16, @@ -182,13 +137,10 @@ static const struct kfd_device_info tonga_device_info = { .supports_cwsr = false, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_device_info = { - .asic_family = CHIP_FIJI, .asic_name = "fiji", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -201,13 +153,10 @@ static const struct kfd_device_info fiji_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_vf_device_info = { - .asic_family = CHIP_FIJI, .asic_name = "fiji", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -220,14 +169,11 @@ static const struct kfd_device_info fiji_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris10_device_info = { - .asic_family = CHIP_POLARIS10, .asic_name = "polaris10", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -240,13 +186,10 @@ static const struct kfd_device_info polaris10_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris10_vf_device_info = { - .asic_family = CHIP_POLARIS10, .asic_name = "polaris10", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -259,13 +202,10 @@ static const struct kfd_device_info polaris10_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris11_device_info = { - .asic_family = CHIP_POLARIS11, .asic_name = "polaris11", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -278,13 +218,10 @@ static const struct kfd_device_info polaris11_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris12_device_info = { - .asic_family = CHIP_POLARIS12, .asic_name = "polaris12", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -297,13 +234,10 @@ static const struct kfd_device_info polaris12_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vegam_device_info = { - .asic_family = CHIP_VEGAM, .asic_name = "vegam", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -316,13 +250,10 @@ static const struct kfd_device_info vegam_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_device_info = { - .asic_family = CHIP_VEGA10, .asic_name = "vega10", .gfx_target_version = 90000, .max_pasid_bits = 16, @@ -335,13 +266,10 @@ static const struct kfd_device_info vega10_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_vf_device_info = { - .asic_family = CHIP_VEGA10, .asic_name = "vega10", .gfx_target_version = 90000, .max_pasid_bits = 16, @@ -354,13 +282,10 @@ static const struct kfd_device_info vega10_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega12_device_info = { - .asic_family = CHIP_VEGA12, .asic_name = "vega12", .gfx_target_version = 90004, .max_pasid_bits = 16, @@ -373,13 +298,10 @@ static const struct kfd_device_info vega12_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega20_device_info = { - .asic_family = CHIP_VEGA20, .asic_name = "vega20", .gfx_target_version = 90006, .max_pasid_bits = 16, @@ -392,13 +314,10 @@ static const struct kfd_device_info vega20_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info arcturus_device_info = { - .asic_family = CHIP_ARCTURUS, .asic_name = "arcturus", .gfx_target_version = 90008, .max_pasid_bits = 16, @@ -411,13 +330,10 @@ static const struct kfd_device_info arcturus_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 6, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info aldebaran_device_info = { - .asic_family = CHIP_ALDEBARAN, .asic_name = "aldebaran", .gfx_target_version = 90010, .max_pasid_bits = 16, @@ -430,15 +346,12 @@ static const struct kfd_device_info aldebaran_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 3, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info renoir_device_info = { - .asic_family = CHIP_RENOIR, .asic_name = "renoir", - .gfx_target_version = 90002, + .gfx_target_version = 90012, .max_pasid_bits = 16, .max_no_of_hqd = 24, .doorbell_size = 8, @@ -449,13 +362,10 @@ static const struct kfd_device_info renoir_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info navi10_device_info = { - .asic_family = CHIP_NAVI10, .asic_name = "navi10", .gfx_target_version = 100100, .max_pasid_bits = 16, @@ -468,13 +378,11 @@ static const struct kfd_device_info navi10_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 145, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navi12_device_info = { - .asic_family = CHIP_NAVI12, .asic_name = "navi12", .gfx_target_version = 100101, .max_pasid_bits = 16, @@ -487,13 +395,11 @@ static const struct kfd_device_info navi12_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 145, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navi14_device_info = { - .asic_family = CHIP_NAVI14, .asic_name = "navi14", .gfx_target_version = 100102, .max_pasid_bits = 16, @@ -506,13 +412,11 @@ static const struct kfd_device_info navi14_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 145, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info sienna_cichlid_device_info = { - .asic_family = CHIP_SIENNA_CICHLID, .asic_name = "sienna_cichlid", .gfx_target_version = 100300, .max_pasid_bits = 16, @@ -525,13 +429,11 @@ static const struct kfd_device_info sienna_cichlid_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 4, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navy_flounder_device_info = { - .asic_family = CHIP_NAVY_FLOUNDER, .asic_name = "navy_flounder", .gfx_target_version = 100301, .max_pasid_bits = 16, @@ -544,13 +446,11 @@ static const struct kfd_device_info navy_flounder_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info vangogh_device_info = { - .asic_family = CHIP_VANGOGH, .asic_name = "vangogh", .gfx_target_version = 100303, .max_pasid_bits = 16, @@ -562,14 +462,12 @@ static const struct kfd_device_info vangogh_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info dimgrey_cavefish_device_info = { - .asic_family = CHIP_DIMGREY_CAVEFISH, .asic_name = "dimgrey_cavefish", .gfx_target_version = 100302, .max_pasid_bits = 16, @@ -582,13 +480,11 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info beige_goby_device_info = { - .asic_family = CHIP_BEIGE_GOBY, .asic_name = "beige_goby", .gfx_target_version = 100304, .max_pasid_bits = 16, @@ -601,13 +497,11 @@ static const struct kfd_device_info beige_goby_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info yellow_carp_device_info = { - .asic_family = CHIP_YELLOW_CARP, .asic_name = "yellow_carp", .gfx_target_version = 100305, .max_pasid_bits = 16, @@ -619,14 +513,12 @@ static const struct kfd_device_info yellow_carp_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info cyan_skillfish_device_info = { - .asic_family = CHIP_CYAN_SKILLFISH, .asic_name = "cyan_skillfish", .gfx_target_version = 100103, .max_pasid_bits = 16, @@ -639,68 +531,208 @@ static const struct kfd_device_info cyan_skillfish_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; -/* For each entry, [0] is regular and [1] is virtualisation device. */ -static const struct kfd_device_info *kfd_supported_devices[][2] = { -#ifdef KFD_SUPPORT_IOMMU_V2 - [CHIP_KAVERI] = {&kaveri_device_info, NULL}, - [CHIP_CARRIZO] = {&carrizo_device_info, NULL}, -#endif - [CHIP_RAVEN] = {&raven_device_info, NULL}, - [CHIP_HAWAII] = {&hawaii_device_info, NULL}, - [CHIP_TONGA] = {&tonga_device_info, NULL}, - [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info}, - [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info}, - [CHIP_POLARIS11] = {&polaris11_device_info, NULL}, - [CHIP_POLARIS12] = {&polaris12_device_info, NULL}, - [CHIP_VEGAM] = {&vegam_device_info, NULL}, - [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info}, - [CHIP_VEGA12] = {&vega12_device_info, NULL}, - [CHIP_VEGA20] = {&vega20_device_info, NULL}, - [CHIP_RENOIR] = {&renoir_device_info, NULL}, - [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info}, - [CHIP_ALDEBARAN] = {&aldebaran_device_info, &aldebaran_device_info}, - [CHIP_NAVI10] = {&navi10_device_info, NULL}, - [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info}, - [CHIP_NAVI14] = {&navi14_device_info, NULL}, - [CHIP_SIENNA_CICHLID] = {&sienna_cichlid_device_info, &sienna_cichlid_device_info}, - [CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info}, - [CHIP_VANGOGH] = {&vangogh_device_info, NULL}, - [CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info}, - [CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info}, - [CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL}, - [CHIP_CYAN_SKILLFISH] = {&cyan_skillfish_device_info, NULL}, -}; - static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, - struct pci_dev *pdev, unsigned int asic_type, bool vf) +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { struct kfd_dev *kfd; const struct kfd_device_info *device_info; const struct kfd2kgd_calls *f2g; + struct pci_dev *pdev = adev->pdev; - if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2) - || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) { - dev_err(kfd_device, "asic_type %d out of range\n", asic_type); - return NULL; /* asic_type out of range */ + switch (adev->asic_type) { +#ifdef KFD_SUPPORT_IOMMU_V2 +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + if (vf) + device_info = NULL; + else + device_info = &kaveri_device_info; + f2g = &gfx_v7_kfd2kgd; + break; +#endif + case CHIP_CARRIZO: + if (vf) + device_info = NULL; + else + device_info = &carrizo_device_info; + f2g = &gfx_v8_kfd2kgd; + break; +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_HAWAII: + if (vf) + device_info = NULL; + else + device_info = &hawaii_device_info; + f2g = &gfx_v7_kfd2kgd; + break; +#endif + case CHIP_TONGA: + if (vf) + device_info = NULL; + else + device_info = &tonga_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + case CHIP_FIJI: + if (vf) + device_info = &fiji_vf_device_info; + else + device_info = &fiji_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + case CHIP_POLARIS10: + if (vf) + device_info = &polaris10_vf_device_info; + else + device_info = &polaris10_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + case CHIP_POLARIS11: + if (vf) + device_info = NULL; + else + device_info = &polaris11_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + case CHIP_POLARIS12: + if (vf) + device_info = NULL; + else + device_info = &polaris12_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + case CHIP_VEGAM: + if (vf) + device_info = NULL; + else + device_info = &vegam_device_info; + f2g = &gfx_v8_kfd2kgd; + break; + default: + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 0, 1): + if (vf) + device_info = &vega10_vf_device_info; + else + device_info = &vega10_device_info; + f2g = &gfx_v9_kfd2kgd; + break; +#ifdef KFD_SUPPORT_IOMMU_V2 + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + if (vf) + device_info = NULL; + else + device_info = &raven_device_info; + f2g = &gfx_v9_kfd2kgd; + break; +#endif + case IP_VERSION(9, 2, 1): + if (vf) + device_info = NULL; + else + device_info = &vega12_device_info; + f2g = &gfx_v9_kfd2kgd; + break; + case IP_VERSION(9, 3, 0): + if (vf) + device_info = NULL; + else + device_info = &renoir_device_info; + f2g = &gfx_v9_kfd2kgd; + break; + case IP_VERSION(9, 4, 0): + if (vf) + device_info = NULL; + else + device_info = &vega20_device_info; + f2g = &gfx_v9_kfd2kgd; + break; + case IP_VERSION(9, 4, 1): + device_info = &arcturus_device_info; + f2g = &arcturus_kfd2kgd; + break; + case IP_VERSION(9, 4, 2): + device_info = &aldebaran_device_info; + f2g = &aldebaran_kfd2kgd; + break; + case IP_VERSION(10, 1, 10): + if (vf) + device_info = NULL; + else + device_info = &navi10_device_info; + f2g = &gfx_v10_kfd2kgd; + break; + case IP_VERSION(10, 1, 2): + device_info = &navi12_device_info; + f2g = &gfx_v10_kfd2kgd; + break; + case IP_VERSION(10, 1, 1): + if (vf) + device_info = NULL; + else + device_info = &navi14_device_info; + f2g = &gfx_v10_kfd2kgd; + break; + case IP_VERSION(10, 1, 3): + if (vf) + device_info = NULL; + else + device_info = &cyan_skillfish_device_info; + f2g = &gfx_v10_kfd2kgd; + break; + case IP_VERSION(10, 3, 0): + device_info = &sienna_cichlid_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 2): + device_info = &navy_flounder_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 1): + if (vf) + device_info = NULL; + else + device_info = &vangogh_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 4): + device_info = &dimgrey_cavefish_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 5): + device_info = &beige_goby_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 3): + if (vf) + device_info = NULL; + else + device_info = &yellow_carp_device_info; + f2g = &gfx_v10_3_kfd2kgd; + break; + default: + return NULL; + } + break; } - device_info = kfd_supported_devices[asic_type][vf]; - f2g = kfd2kgd_funcs[asic_type]; - if (!device_info || !f2g) { - dev_err(kfd_device, "%s %s not supported in kfd\n", - amdgpu_asic_name[asic_type], vf ? "VF" : ""); + if (adev->ip_versions[GC_HWIP][0]) + dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", + adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); + else + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); return NULL; } @@ -708,21 +740,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, if (!kfd) return NULL; - /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. - * 32 and 64-bit requests are possible and must be - * supported. - */ - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd); - if (device_info->needs_pci_atomics && - !kfd->pci_atomic_requested) { - dev_info(kfd_device, - "skipped device %x:%x, PCI rejects atomics\n", - pdev->vendor, pdev->device); - kfree(kfd); - return NULL; - } - - kfd->kgd = kgd; + kfd->adev = adev; kfd->device_info = device_info; kfd->pdev = pdev; kfd->init_complete = false; @@ -743,23 +761,23 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, static void kfd_cwsr_init(struct kfd_dev *kfd) { if (cwsr_enable && kfd->device_info->supports_cwsr) { - if (kfd->device_info->asic_family < CHIP_VEGA10) { + if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx8_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); - } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_arcturus_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); - } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_aldebaran_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); - } else if (kfd->device_info->asic_family < CHIP_NAVI10) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx9_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); - } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_nv1x_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); @@ -780,18 +798,17 @@ static int kfd_gws_init(struct kfd_dev *kfd) if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support - || (kfd->device_info->asic_family == CHIP_VEGA10 - && kfd->mec2_fw_version >= 0x81b3) - || (kfd->device_info->asic_family >= CHIP_VEGA12 - && kfd->device_info->asic_family <= CHIP_RAVEN - && kfd->mec2_fw_version >= 0x1b3) - || (kfd->device_info->asic_family == CHIP_ARCTURUS - && kfd->mec2_fw_version >= 0x30) - || (kfd->device_info->asic_family == CHIP_ALDEBARAN - && kfd->mec2_fw_version >= 0x28)) - ret = amdgpu_amdkfd_alloc_gws(kfd->kgd, - amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws); + if (hws_gws_support || (KFD_IS_SOC15(kfd) && + ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + && kfd->mec2_fw_version >= 0x81b3) || + (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + && kfd->mec2_fw_version >= 0x1b3) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + && kfd->mec2_fw_version >= 0x30) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + && kfd->mec2_fw_version >= 0x28)))) + ret = amdgpu_amdkfd_alloc_gws(kfd->adev, + kfd->adev->gds.gws_size, &kfd->gws); return ret; } @@ -808,11 +825,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, unsigned int size, map_process_packet_size; kfd->ddev = ddev; - kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); - kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC2); - kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; @@ -821,6 +838,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - kfd->vm_info.first_vmid_kfd + 1; + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. + * 32 and 64-bit requests are possible and must be + * supported. + */ + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); + if (!kfd->pci_atomic_requested && + kfd->device_info->needs_pci_atomics && + (!kfd->device_info->no_atomic_fw_version || + kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { + dev_info(kfd_device, + "skipped device %x:%x, PCI rejects atomics %d<%d\n", + kfd->pdev->vendor, kfd->pdev->device, + kfd->mec_fw_version, + kfd->device_info->no_atomic_fw_version); + return false; + } + /* Verify module parameters regarding mapped process number*/ if ((hws_max_conc_proc < 0) || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { @@ -840,10 +874,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * calculate max size of runlist packet. * There can be only 2 packets at once */ - map_process_packet_size = - kfd->device_info->asic_family == CHIP_ALDEBARAN ? + map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? sizeof(struct pm4_mes_map_process_aldebaran) : - sizeof(struct pm4_mes_map_process); + sizeof(struct pm4_mes_map_process); size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) + sizeof(struct pm4_mes_runlist)) * 2; @@ -855,7 +888,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, size += 512 * 1024; if (amdgpu_amdkfd_alloc_gtt_mem( - kfd->kgd, size, &kfd->gtt_mem, + kfd->adev, size, &kfd->gtt_mem, &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, false)) { dev_err(kfd_device, "Could not allocate %d bytes\n", size); @@ -876,9 +909,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } - kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd); + kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; - kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd); + kfd->noretry = kfd->adev->gmc.noretry; if (kfd_interrupt_init(kfd)) { dev_err(kfd_device, "Error initializing interrupts\n"); @@ -896,7 +929,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, */ if (kfd_gws_init(kfd)) { dev_err(kfd_device, "Could not allocate %d gws\n", - amdgpu_amdkfd_get_num_gws(kfd->kgd)); + kfd->adev->gds.gws_size); goto gws_error; } @@ -904,13 +937,17 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_double_confirm_iommu_support(kfd); if (kfd_iommu_device_init(kfd)) { + kfd->use_iommu_v2 = false; dev_err(kfd_device, "Error initializing iommuv2\n"); goto device_iommu_error; } kfd_cwsr_init(kfd); - svm_migrate_init((struct amdgpu_device *)kfd->kgd); + svm_migrate_init(kfd->adev); + + if(kgd2kfd_resume_iommu(kfd)) + goto device_iommu_error; if (kfd_resume(kfd)) goto kfd_resume_error; @@ -945,10 +982,10 @@ kfd_interrupt_error: kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); @@ -959,16 +996,15 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - svm_migrate_fini((struct amdgpu_device *)kfd->kgd); device_queue_manager_uninit(kfd->dqm); kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -1057,31 +1093,29 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } -static int kfd_resume(struct kfd_dev *kfd) +int kgd2kfd_resume_iommu(struct kfd_dev *kfd) { int err = 0; err = kfd_iommu_resume(kfd); - if (err) { + if (err) dev_err(kfd_device, "Failed to resume IOMMU for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - return err; - } + return err; +} + +static int kfd_resume(struct kfd_dev *kfd) +{ + int err = 0; err = kfd->dqm->ops.start(kfd->dqm); - if (err) { + if (err) dev_err(kfd_device, "Error starting queue manager for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - goto dqm_start_error; - } return err; - -dqm_start_error: - kfd_iommu_suspend(kfd); - return err; } static inline void kfd_queue_work(struct workqueue_struct *wq, @@ -1406,7 +1440,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) void kfd_inc_compute_active(struct kfd_dev *kfd) { if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, false); + amdgpu_amdkfd_set_compute_idle(kfd->adev, false); } void kfd_dec_compute_active(struct kfd_dev *kfd) @@ -1414,7 +1448,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd) int count = atomic_dec_return(&kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, true); + amdgpu_amdkfd_set_compute_idle(kfd->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } @@ -1424,6 +1458,26 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); } +/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and + * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. + * When the device has more than two engines, we reserve two for PCIe to enable + * full-duplex and the rest are used as XGMI. + */ +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +{ + /* If XGMI is not supported, all SDMA engines are PCIe */ + if (!kdev->adev->gmc.xgmi.supported) + return kdev->adev->sdma.num_instances; + + return min(kdev->adev->sdma.num_instances, 2); +} + +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +{ + /* After reserved for PCIe, the rest of engines are XGMI */ + return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f8fce9d05f50..2af2b3268171 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -99,38 +99,29 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) return dqm->dev->shared_resources.num_pipe_per_mec; } -static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_sdma_engines; -} - -static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_xgmi_sdma_engines; -} - static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) { - return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm); + return kfd_get_num_sdma_engines(dqm->dev) + + kfd_get_num_xgmi_sdma_engines(dqm->dev); } unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_sdma_engines(dqm->dev) * + dqm->dev->device_info->num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_xgmi_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_xgmi_sdma_engines(dqm->dev) * + dqm->dev->device_info->num_sdma_queues_per_engine; } void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { return dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, @@ -157,7 +148,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) { + if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to * preserve the user mode ABI. */ @@ -202,7 +193,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, unsigned int old; struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family) || + if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) return; @@ -216,7 +207,7 @@ static void program_trap_handler_settings(struct device_queue_manager *dqm, { if (dqm->dev->kfd2kgd->program_trap_handler_settings) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->tba_addr, qpd->tma_addr); } @@ -250,21 +241,20 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 && - dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() * is called, i.e. when the first queue is created. */ - dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, qpd->vmid, qpd->page_table_base); /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); if (dqm->dev->kfd2kgd->set_scratch_backing_va) - dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, qpd->sh_hidden_private_base, qpd->vmid); return 0; @@ -283,7 +273,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, if (ret) return ret; - return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, + return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid, qpd->ib_base, (uint32_t *)qpd->ib_kaddr, pmf->release_mem_size / sizeof(uint32_t)); } @@ -293,7 +283,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct queue *q) { /* On GFX v7, CP doesn't flush TC at dequeue */ - if (q->device->device_info->asic_family == CHIP_HAWAII) + if (q->device->adev->asic_type == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) pr_err("Failed to flush TC\n"); @@ -557,7 +547,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, return retval; } -static int update_queue(struct device_queue_manager *dqm, struct queue *q) +static int update_queue(struct device_queue_manager *dqm, struct queue *q, + struct mqd_update_info *minfo) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -605,7 +596,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) } } - mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties); + mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo); /* * check active state vs. the previous state and modify @@ -775,7 +766,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, if (!list_empty(&qpd->queues_list)) { dqm->dev->kfd2kgd->set_vm_context_page_table_base( - dqm->dev->kgd, + dqm->dev->adev, qpd->vmid, qpd->page_table_base); kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); @@ -953,7 +944,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->kgd, pasid, vmid); + dqm->dev->adev, pasid, vmid); } static void init_interrupts(struct device_queue_manager *dqm) @@ -962,7 +953,7 @@ static void init_interrupts(struct device_queue_manager *dqm) for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) if (is_pipe_enabled(dqm, 0, i)) - dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); + dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i); } static int initialize_nocpsch(struct device_queue_manager *dqm) @@ -1016,7 +1007,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) pr_info("SW scheduler is used"); init_interrupts(dqm); - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) return pm_init(&dqm->packet_mgr, dqm); dqm->sched_running = true; @@ -1025,7 +1016,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) pm_uninit(&dqm->packet_mgr, false); dqm->sched_running = false; @@ -1054,9 +1045,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, dqm->sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; q->properties.sdma_engine_id = q->sdma_id % - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (dqm->xgmi_sdma_bitmap == 0) { pr_err("No more XGMI SDMA queue to allocate\n"); @@ -1071,10 +1062,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * assumes the first N engines are always * PCIe-optimized ones */ - q->properties.sdma_engine_id = get_num_sdma_engines(dqm) + - q->sdma_id % get_num_xgmi_sdma_engines(dqm); + q->properties.sdma_engine_id = + kfd_get_num_sdma_engines(dqm->dev) + + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_xgmi_sdma_engines(dqm); + kfd_get_num_xgmi_sdma_engines(dqm->dev); } pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); @@ -1131,7 +1123,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) res.queue_mask |= 1ull << amdgpu_queue_mask_bit_to_set_resource_bit( - (struct amdgpu_device *)dqm->dev->kgd, i); + dqm->dev->adev, i); } res.gws_mask = ~0ull; res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; @@ -1225,6 +1217,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) bool hanging; dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (!dqm->is_hws_hang) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); hanging = dqm->is_hws_hang || dqm->is_resetting; @@ -1429,7 +1426,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->sched_running) return 0; - if (dqm->is_hws_hang) + if (dqm->is_hws_hang || dqm->is_resetting) return -EIO; if (!dqm->active_runlist) return retval; @@ -1844,7 +1841,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; - retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), (void *)&(mem_obj->cpu_ptr), false); @@ -1861,7 +1858,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (!dqm) return NULL; - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { /* HWS is not available on Hawaii. */ case CHIP_HAWAII: /* HWS depends on CWSR for timely dequeue. CWSR is not @@ -1924,7 +1921,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) goto out_free; } - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_CARRIZO: device_queue_manager_init_vi(&dqm->asic_ops); break; @@ -1946,31 +1943,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) device_queue_manager_init_vi_tonga(&dqm->asic_ops); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - device_queue_manager_init_v9(&dqm->asic_ops); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - device_queue_manager_init_v10_navi10(&dqm->asic_ops); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - goto out_free; + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) + device_queue_manager_init_v10_navi10(&dqm->asic_ops); + else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + device_queue_manager_init_v9(&dqm->asic_ops); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + goto out_free; + } } if (init_mqd_managers(dqm)) @@ -1994,7 +1976,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, { WARN(!mqd, "No hiq sdma mqd trunk to free"); - amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); } void device_queue_manager_uninit(struct device_queue_manager *dqm) @@ -2025,7 +2007,7 @@ static void kfd_process_hw_exception(struct work_struct *work) { struct device_queue_manager *dqm = container_of(work, struct device_queue_manager, hw_exception_work); - amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); + amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } #if defined(CONFIG_DEBUG_FS) @@ -2064,7 +2046,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); if (!r) { @@ -2086,7 +2068,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) continue; r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; @@ -2103,7 +2085,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c8719682c4da..499fc0ea387f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -93,7 +93,7 @@ struct device_queue_manager_ops { struct queue *q); int (*update_queue)(struct device_queue_manager *dqm, - struct queue *q); + struct queue *q, struct mqd_update_info *minfo); int (*register_process)(struct device_queue_manager *dqm, struct qcm_process_device *qpd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index b5c3d13643f1..f20434d9980e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { /* Aldebaran can safely support different XNACK modes * per process */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 3eea4edee355..afe72dd11325 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN && - dev->device_info->asic_family != CHIP_RENOIR) { + + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 2e86692def19..2e2b7ceb71db 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -308,7 +308,7 @@ * 16MB are reserved for kernel use (CWSR trap handler and kernel IB * for now). */ -#define SVM_USER_BASE 0x1000000ull +#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE) #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) @@ -394,7 +394,7 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_base = pdd->gpuvm_limit = 0; pdd->scratch_base = pdd->scratch_limit = 0; } else { - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_CARRIZO: @@ -406,29 +406,14 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGAM: kfd_init_apertures_vi(pdd, id); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - kfd_init_apertures_v9(pdd, id); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + kfd_init_apertures_v9(pdd, id); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + return -EINVAL; + } } if (!dev->use_iommu_v2) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 12d91e53556c..20512a4e9a91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -231,7 +231,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_gpu_reset(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } break; @@ -253,7 +253,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); } else if (source_id == SOC15_INTSRC_SDMA_ECC) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_gpu_reset(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index a2b77d1df854..406479a369a9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -91,7 +91,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_gpu_addr = kq->pq->gpu_addr; /* For CIK family asics, kq->eop_mem is not needed */ - if (dev->device_info->asic_family > CHIP_MULLINS) { + if (dev->adev->asic_type > CHIP_MULLINS) { retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); if (retval != 0) goto err_eop_allocate_vidmem; @@ -136,7 +136,6 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr; prop.eop_ring_buffer_address = kq->eop_gpu_addr; prop.eop_ring_buffer_size = PAGE_SIZE; - prop.cu_mask = NULL; if (init_queue(&kq->queue, &prop) != 0) goto err_init_queue; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index dab290a4d19d..d84cec0022b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -20,7 +20,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - #include <linux/types.h> #include <linux/hmm.h> #include <linux/dma-direction.h> @@ -34,6 +33,11 @@ #include "kfd_svm.h" #include "kfd_migrate.h" +#ifdef dev_fmt +#undef dev_fmt +#endif +#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__ + static uint64_t svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) { @@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, gart_d = svm_migrate_direct_mapping_addr(adev, *vram); } if (r) { - pr_debug("failed %d to create gart mapping\n", r); + dev_err(adev->dev, "fail %d create gart mapping\n", r); goto out_unlock; } r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, NULL, &next, false, true, false); if (r) { - pr_debug("failed %d to copy memory\n", r); + dev_err(adev->dev, "fail %d to copy memory\n", r); goto out_unlock; } @@ -264,6 +268,32 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) +{ + unsigned long cpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + cpages++; + } + return cpages; +} + +static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) +{ + unsigned long upages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) + upages++; + } + return upages; +} + static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, @@ -285,7 +315,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, r = svm_range_vram_node_new(adev, prange, true); if (r) { - pr_debug("failed %d get 0x%llx pages from vram\n", r, npages); + dev_err(adev->dev, "fail %d to alloc vram\n", r); goto out; } @@ -300,12 +330,11 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE); r = dma_mapping_error(dev, src[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(adev->dev, "fail %d dma_map_page\n", r); goto out_free_vram_pages; } } else { @@ -325,8 +354,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, continue; } - pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n", - src[i] >> PAGE_SHIFT, page_to_pfn(spage)); + pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", + src[i] >> PAGE_SHIFT, page_to_pfn(spage)); if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { r = svm_migrate_copy_memory_gart(adev, src + i - j, @@ -372,7 +401,7 @@ out: return r; } -static int +static long svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) @@ -381,6 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; + unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -405,23 +435,31 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, + prange->start, prange->last); goto out_free; } - if (migrate.cpages != npages) { - pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n", - migrate.cpages, - npages); - } - if (migrate.cpages) { - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, - scratch); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); + cpages = migrate.cpages; + if (!cpages) { + pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", + prange->start, prange->last); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + cpages, npages); + else + pr_debug("0x%lx pages migrated\n", cpages); + + r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); + migrate_vma_pages(&migrate); + + pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + svm_migrate_successful_pages(&migrate), cpages, migrate.npages); + + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -429,12 +467,13 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages); - } + WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); + return cpages; + } return r; } @@ -456,7 +495,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, unsigned long addr, start, end; struct vm_area_struct *vma; struct amdgpu_device *adev; - int r = 0; + unsigned long cpages = 0; + long r = 0; if (prange->actual_loc == best_loc) { pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", @@ -488,17 +528,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, next = min(vma->vm_end, end); r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed to migrate\n"); + if (r < 0) { + pr_debug("failed %ld to migrate\n", r); break; + } else { + cpages += r; } addr = next; } - if (!r) + if (cpages) prange->actual_loc = best_loc; - return r; + return r < 0 ? r : 0; } static void svm_migrate_page_free(struct page *page) @@ -506,7 +548,7 @@ static void svm_migrate_page_free(struct page *page) struct svm_range_bo *svm_bo = page->zone_device_data; if (svm_bo) { - pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref)); + pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); svm_range_bo_unref(svm_bo); } } @@ -572,15 +614,14 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); r = dma_mapping_error(dev, dst[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(adev->dev, "fail %d dma_map_page\n", r); goto out_oom; } - pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n", - dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); + pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", + dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); - migrate->dst[i] |= MIGRATE_PFN_LOCKED; j++; } @@ -599,11 +640,13 @@ out_oom: return r; } -static int +static long svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + unsigned long upages = npages; + unsigned long cpages = 0; struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; @@ -631,36 +674,47 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, r = migrate_vma_setup(&migrate); if (r) { - pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); + dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, + prange->start, prange->last); goto out_free; } - pr_debug("cpages %ld\n", migrate.cpages); - - if (migrate.cpages) { - r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, - scratch, npages); - migrate_vma_pages(&migrate); - svm_migrate_copy_done(adev, mfence); - migrate_vma_finalize(&migrate); - } else { + cpages = migrate.cpages; + if (!cpages) { pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", prange->start, prange->last); + upages = svm_migrate_unsuccessful_pages(&migrate); + goto out_free; } + if (cpages != npages) + pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", + cpages, npages); + else + pr_debug("0x%lx pages migrated\n", cpages); + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, + scratch, npages); + migrate_vma_pages(&migrate); + + upages = svm_migrate_unsuccessful_pages(&migrate); + pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + upages, cpages, migrate.npages); + + svm_migrate_copy_done(adev, mfence); + migrate_vma_finalize(&migrate); svm_range_dma_unmap(adev->dev, scratch, 0, npages); out_free: kvfree(buf); out: - if (!r) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_out, - pdd->page_out + migrate.cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); + + return upages; } - return r; + return r ? r : upages; } /** @@ -680,7 +734,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) unsigned long addr; unsigned long start; unsigned long end; - int r = 0; + unsigned long upages = 0; + long r = 0; if (!prange->actual_loc) { pr_debug("[0x%lx 0x%lx] already migrated to ram\n", @@ -711,18 +766,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) next = min(vma->vm_end, end); r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); - if (r) { - pr_debug("failed %d to migrate\n", r); + if (r < 0) { + pr_debug("failed %ld to migrate\n", r); break; + } else { + upages += r; } addr = next; } - if (!r) { + if (!upages) { svm_range_vram_node_free(prange); prange->actual_loc = 0; } - return r; + + return r < 0 ? r : 0; } /** @@ -740,7 +798,7 @@ static int svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, struct mm_struct *mm) { - int r; + int r, retries = 3; /* * TODO: for both devices with PCIe large bar or on same xgmi hive, skip @@ -749,9 +807,14 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); - r = svm_migrate_vram_to_ram(prange, mm); - if (r) - return r; + do { + r = svm_migrate_vram_to_ram(prange, mm); + if (r) + return r; + } while (prange->actual_loc && --retries); + + if (prange->actual_loc) + return -EDEADLK; return svm_migrate_ram_to_vram(prange, best_loc, mm); } @@ -796,6 +859,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) pr_debug("failed find process at fault address 0x%lx\n", addr); return VM_FAULT_SIGBUS; } + if (READ_ONCE(p->svms.faulting_task) == current) { + pr_debug("skipping ram migration\n"); + kfd_unref_process(p); + return 0; + } addr >>= PAGE_SHIFT; pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); @@ -870,7 +938,7 @@ int svm_migrate_init(struct amdgpu_device *adev) void *r; /* Page migration works on Vega10 or newer */ - if (kfddev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(kfddev)) return -EINVAL; pgmap = &kfddev->pgmap; @@ -891,11 +959,17 @@ int svm_migrate_init(struct amdgpu_device *adev) pgmap->ops = &svm_migrate_pgmap_ops; pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; + + /* Device manager releases device-specific resources, memory region and + * pgmap when driver disconnects from device. + */ r = devm_memremap_pages(adev->dev, pgmap); if (IS_ERR(r)) { pr_err("failed to register HMM device memory\n"); - devm_release_mem_region(adev->dev, res->start, - res->end - res->start + 1); + + /* Disable SVM support capability */ + pgmap->type = 0; + devm_release_mem_region(adev->dev, res->start, resource_size(res)); return PTR_ERR(r); } @@ -908,12 +982,3 @@ int svm_migrate_init(struct amdgpu_device *adev) return 0; } - -void svm_migrate_fini(struct amdgpu_device *adev) -{ - struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap; - - devm_memunmap_pages(adev->dev, pgmap); - devm_release_mem_region(adev->dev, pgmap->range.start, - pgmap->range.end - pgmap->range.start + 1); -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 0de76b5d4973..2f5b3394c9ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -47,7 +47,6 @@ unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); int svm_migrate_init(struct amdgpu_device *adev); -void svm_migrate_fini(struct amdgpu_device *adev); #else @@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev) { return 0; } -static inline void svm_migrate_fini(struct amdgpu_device *adev) -{ - /* empty */ -} #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index c021519af810..7b4118915bf6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -100,7 +100,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, struct kfd_cu_info cu_info; uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; int i, se, sh, cu; - amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); if (cu_mask_count > cu_info.cu_active_number) cu_mask_count = cu_info.cu_active_number; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 6e6918ccedfd..965e17c5dbb4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -80,7 +80,8 @@ struct mqd_manager { struct mm_struct *mms); void (*update_mqd)(struct mqd_manager *mm, void *mqd, - struct queue_properties *q); + struct queue_properties *q, + struct mqd_update_info *minfo); int (*destroy_mqd)(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 064914e1e8d6..e9a8e21e144e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -42,16 +42,17 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct cik_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -135,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, @@ -152,7 +153,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static void free_mqd(struct mqd_manager *mm, void *mqd, @@ -170,7 +171,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } @@ -179,13 +180,14 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } static void __update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, unsigned int atc_bit) + struct queue_properties *q, struct mqd_update_info *minfo, + unsigned int atc_bit) { struct cik_mqd *m; @@ -214,16 +216,17 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_pq_control |= NO_UPDATE_RPTR; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, 1); + __update_mqd(mm, mqd, q, minfo, 1); } static uint32_t read_doorbell_id(void *mqd) @@ -234,13 +237,15 @@ static uint32_t read_doorbell_id(void *mqd) } static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, 0); + __update_mqd(mm, mqd, q, minfo, 0); } static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct cik_sdma_rlc_registers *m; @@ -271,7 +276,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout, + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -284,7 +289,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied(struct mqd_manager *mm, void *mqd, @@ -292,7 +297,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -301,7 +306,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } /* @@ -318,7 +323,8 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, } static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct cik_mqd *m; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index c7fb59ca597f..d74d8a6ac27a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -42,16 +42,17 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct v10_compute_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -136,7 +137,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -147,7 +148,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); return r; @@ -157,12 +158,13 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v10_compute_mqd *m; @@ -218,7 +220,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, if (mm->dev->cwsr_enabled) m->cp_hqd_ctx_save_control = 0; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -237,7 +239,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -252,7 +254,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -311,14 +313,14 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -326,7 +328,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, #define SDMA_RLC_DUMMY_DEFAULT 0xf static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v10_sdma_mqd *m; @@ -360,14 +363,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 7f4e102ff4bd..326eb2285029 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -43,16 +43,17 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct v9_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -107,7 +108,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, + retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -188,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -198,7 +199,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); } @@ -207,12 +208,13 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v9_mqd *m; @@ -269,7 +271,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) m->cp_hqd_ctx_save_control = 0; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -289,7 +291,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -299,7 +301,7 @@ static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_dev *kfd = mm->dev; if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem); kfree(mqd_mem_obj); } else { kfd_gtt_sa_free(mm->dev, mqd_mem_obj); @@ -311,7 +313,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -366,14 +368,14 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -381,7 +383,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, #define SDMA_RLC_DUMMY_DEFAULT 0xf static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct v9_sdma_mqd *m; @@ -415,14 +418,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 33dbd22d290f..d456e950ce1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -45,16 +45,17 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) } static void update_cu_mask(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct mqd_update_info *minfo) { struct vi_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (q->cu_mask_count == 0) + if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || + !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, - q->cu_mask, q->cu_mask_count, se_mask); + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; @@ -150,7 +151,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -161,14 +162,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } static void __update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q, unsigned int mtype, - unsigned int atc_bit) + struct queue_properties *q, struct mqd_update_info *minfo, + unsigned int mtype, unsigned int atc_bit) { struct vi_mqd *m; @@ -230,7 +231,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; - update_cu_mask(mm, mqd, q); + update_cu_mask(mm, mqd, minfo); set_priority(m, q); q->is_active = QUEUE_IS_ACTIVE(*q); @@ -238,9 +239,10 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, static void update_mqd(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_CC, 1); + __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1); } static uint32_t read_doorbell_id(void *mqd) @@ -251,9 +253,10 @@ static uint32_t read_doorbell_id(void *mqd) } static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_UC, 0); + __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); } static int destroy_mqd(struct mqd_manager *mm, void *mqd, @@ -262,7 +265,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -277,7 +280,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -317,9 +320,10 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, } static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { - __update_mqd(mm, mqd, q, MTYPE_UC, 0); + __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); } static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, @@ -336,20 +340,21 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, if (gart_addr) *gart_addr = mqd_mem_obj->gpu_addr; - mm->update_mqd(mm, m, q); + mm->update_mqd(mm, m, q, NULL); } static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, - struct queue_properties *q) + struct queue_properties *q, + struct mqd_update_info *minfo) { struct vi_sdma_mqd *m; @@ -384,14 +389,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index e547f1f8c49f..1439420925a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -223,7 +223,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) { - switch (dqm->dev->device_info->asic_family) { + switch (dqm->dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: /* PM4 packet structures on CIK are the same as on VI */ @@ -236,31 +236,16 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGAM: pm->pmf = &kfd_vi_pm_funcs; break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - pm->pmf = &kfd_v9_pm_funcs; - break; - case CHIP_ALDEBARAN: - pm->pmf = &kfd_aldebaran_pm_funcs; - break; default: - WARN(1, "Unexpected ASIC family %u", - dqm->dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) + pm->pmf = &kfd_aldebaran_pm_funcs; + else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1)) + pm->pmf = &kfd_v9_pm_funcs; + else { + WARN(1, "Unexpected ASIC family %u", + dqm->dev->adev->asic_type); + return -EINVAL; + } } pm->dqm = dqm; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ab83b0de6b22..7ea528941951 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -183,7 +183,8 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) +#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) +#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, @@ -194,7 +195,6 @@ struct kfd_event_interrupt_class { }; struct kfd_device_info { - enum amd_asic_type asic_family; const char *asic_name; uint32_t gfx_target_version; const struct kfd_event_interrupt_class *event_interrupt_class; @@ -207,11 +207,13 @@ struct kfd_device_info { bool supports_cwsr; bool needs_iommu_device; bool needs_pci_atomics; - unsigned int num_sdma_engines; - unsigned int num_xgmi_sdma_engines; + uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; }; +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); + struct kfd_mem_obj { uint32_t range_start; uint32_t range_end; @@ -227,7 +229,7 @@ struct kfd_vmid_info { }; struct kfd_dev { - struct kgd_dev *kgd; + struct amdgpu_device *adev; const struct kfd_device_info *device_info; struct pci_dev *pdev; @@ -471,9 +473,6 @@ struct queue_properties { uint32_t ctl_stack_size; uint64_t tba_addr; uint64_t tma_addr; - /* Relevant for CU */ - uint32_t cu_mask_count; /* Must be a multiple of 32 */ - uint32_t *cu_mask; }; #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ @@ -481,6 +480,20 @@ struct queue_properties { (q).queue_percent > 0 && \ !(q).is_evicted) +enum mqd_update_flag { + UPDATE_FLAG_CU_MASK = 0, +}; + +struct mqd_update_info { + union { + struct { + uint32_t count; /* Must be a multiple of 32 */ + uint32_t *ptr; + } cu_mask; + }; + enum mqd_update_flag update_flag; +}; + /** * struct queue * @@ -607,12 +620,14 @@ struct qcm_process_device { uint32_t sh_hidden_private_base; /* CWSR memory */ + struct kgd_mem *cwsr_mem; void *cwsr_kaddr; uint64_t cwsr_base; uint64_t tba_addr; uint64_t tma_addr; /* IB memory */ + struct kgd_mem *ib_mem; uint64_t ib_base; void *ib_kaddr; @@ -752,8 +767,10 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; + atomic_t drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); + struct task_struct *faulting_task; }; /* Process data */ @@ -807,6 +824,7 @@ struct kfd_process { /* Event ID allocator and lookup */ struct idr event_idr; /* Event page */ + u64 signal_handle; struct kfd_signal_page *signal_page; size_t signal_mapped_size; size_t signal_event_count; @@ -874,7 +892,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_kgd(struct kfd_process *p, +int kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, @@ -967,7 +985,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); @@ -1030,10 +1048,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, unsigned int *qid, uint32_t *p_doorbell_offset_in_process); int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p); -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, +int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, struct queue_properties *p); +int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, + struct mqd_update_info *minfo); int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, void *gws); struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18cad2..d4c8a6948a9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -72,6 +72,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); +static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); + struct kfd_procfs_tree { struct kobject *kobj; }; @@ -286,7 +288,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Collect wave count from device if it supports */ wave_cnt = 0; max_waves_per_cu = 0; - dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt, + dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, &max_waves_per_cu); /* Translate wave count to number of compute units */ @@ -685,12 +687,17 @@ void kfd_process_destroy_wq(void) } static void kfd_process_free_gpuvm(struct kgd_mem *mem, - struct kfd_process_device *pdd) + struct kfd_process_device *pdd, void *kptr) { struct kfd_dev *dev = pdd->dev; - amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv, + if (kptr) { + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem); + kptr = NULL; + } + + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, NULL); } @@ -702,63 +709,46 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, */ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, uint64_t gpu_va, uint32_t size, - uint32_t flags, void **kptr) + uint32_t flags, struct kgd_mem **mem, void **kptr) { struct kfd_dev *kdev = pdd->dev; - struct kgd_mem *mem = NULL; - int handle; int err; - err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, - pdd->drm_priv, &mem, NULL, flags); + err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, + pdd->drm_priv, mem, NULL, flags); if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); if (err) goto err_map_mem; - err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } - /* Create an obj handle so kfd_process_device_remove_obj_handle - * will take care of the bo removal when the process finishes. - * We do not need to take p->mutex, because the process is just - * created and the ioctls have not had the chance to run. - */ - handle = kfd_process_device_create_obj_handle(pdd, mem); - - if (handle < 0) { - err = handle; - goto free_gpuvm; - } - if (kptr) { - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, - (struct kgd_mem *)mem, kptr, NULL); + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev, + (struct kgd_mem *)*mem, kptr, NULL); if (err) { pr_debug("Map GTT BO to kernel failed\n"); - goto free_obj_handle; + goto sync_memory_failed; } } return err; -free_obj_handle: - kfd_process_device_remove_obj_handle(pdd, handle); -free_gpuvm: sync_memory_failed: - kfd_process_free_gpuvm(mem, pdd); - return err; + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); err_map_mem: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); err_alloc_mem: + *mem = NULL; *kptr = NULL; return err; } @@ -776,6 +766,7 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; + struct kgd_mem *mem; void *kaddr; int ret; @@ -784,15 +775,26 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd) /* ib_base is only set for dGPU */ ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags, - &kaddr); + &mem, &kaddr); if (ret) return ret; + qpd->ib_mem = mem; qpd->ib_kaddr = kaddr; return 0; } +static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) +{ + struct qcm_process_device *qpd = &pdd->qpd; + + if (!qpd->ib_kaddr || !qpd->ib_base) + return; + + kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr); +} + struct kfd_process *kfd_create_process(struct file *filep) { struct kfd_process *process; @@ -938,15 +940,46 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) if (!peer_pdd->drm_priv) continue; amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer_pdd->dev->kgd, mem, peer_pdd->drm_priv); + peer_pdd->dev->adev, mem, peer_pdd->drm_priv); } - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, pdd->drm_priv, NULL); kfd_process_device_remove_obj_handle(pdd, id); } } +/* + * Just kunmap and unpin signal BO here. It will be freed in + * kfd_process_free_outstanding_kfd_bos() + */ +static void kfd_process_kunmap_signal_bo(struct kfd_process *p) +{ + struct kfd_process_device *pdd; + struct kfd_dev *kdev; + void *mem; + + kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle)); + if (!kdev) + return; + + mutex_lock(&p->mutex); + + pdd = kfd_get_process_device_data(kdev, p); + if (!pdd) + goto out; + + mem = kfd_process_device_translate_handle( + pdd, GET_IDR_HANDLE(p->signal_handle)); + if (!mem) + goto out; + + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem); + +out: + mutex_unlock(&p->mutex); +} + static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p) { int i; @@ -965,9 +998,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", pdd->dev->id, p->pasid); + kfd_process_device_destroy_cwsr_dgpu(pdd); + kfd_process_device_destroy_ib_mem(pdd); + if (pdd->drm_file) { amdgpu_amdkfd_gpuvm_release_process_vm( - pdd->dev->kgd, pdd->drm_priv); + pdd->dev->adev, pdd->drm_priv); fput(pdd->drm_file); } @@ -1049,9 +1085,11 @@ static void kfd_process_wq_release(struct work_struct *work) { struct kfd_process *p = container_of(work, struct kfd_process, release_work); + kfd_process_remove_sysfs(p); kfd_iommu_unbind_process(p); + kfd_process_kunmap_signal_bo(p); kfd_process_free_outstanding_kfd_bos(p); svm_range_list_fini(p); @@ -1198,6 +1236,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; + struct kgd_mem *mem; void *kaddr; int ret; @@ -1206,10 +1245,11 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) /* cwsr_base is only set for dGPU */ ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base, - KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr); + KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr); if (ret) return ret; + qpd->cwsr_mem = mem; qpd->cwsr_kaddr = kaddr; qpd->tba_addr = qpd->cwsr_base; @@ -1222,6 +1262,17 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd) return 0; } +static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd) +{ + struct kfd_dev *dev = pdd->dev; + struct qcm_process_device *qpd = &pdd->qpd; + + if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) + return; + + kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr); +} + void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr) @@ -1266,14 +1317,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support the SVM APIs and don't need to be considered * for the XNACK mode selection. */ - if (dev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(dev)) continue; /* Aldebaran can always support XNACK because it can support * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && - dev->device_info->asic_family == CHIP_ALDEBARAN) + if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) continue; /* GFXv10 and later GPUs do not support shader preemption @@ -1281,7 +1331,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * management and memory-manager-related preemptions or * even deadlocks. */ - if (dev->device_info->asic_family >= CHIP_NAVI10) + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; if (dev->noretry) @@ -1380,7 +1430,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, int range_start = dev->shared_resources.non_cp_doorbells_start; int range_end = dev->shared_resources.non_cp_doorbells_end; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) + if (!KFD_IS_SOC15(dev)) return 0; qpd->doorbell_bitmap = @@ -1496,7 +1546,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, dev = pdd->dev; ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( - dev->kgd, drm_file, p->pasid, + dev->adev, drm_file, p->pasid, &p->kgd_process_info, &p->ef); if (ret) { pr_err("Failed to create process VM object\n"); @@ -1664,7 +1714,11 @@ int kfd_process_evict_queues(struct kfd_process *p) r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, &pdd->qpd); - if (r) { + /* evict return -EIO if HWS is hang or asic is resetting, in this case + * we would like to set all the queues to be in evicted state to prevent + * them been add back since they actually not be saved right now. + */ + if (r && r != -EIO) { pr_err("Failed to evict process queues\n"); goto fail; } @@ -1724,14 +1778,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev, +kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx) { - struct kgd_dev *kgd = (struct kgd_dev *)adev; int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) { + if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { *gpuid = p->pdds[i]->dev->id; *gpuidx = i; return 0; @@ -1896,10 +1949,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) * only happens when the first queue is created. */ if (pdd->qpd.vmid) - amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, pdd->process->pasid, type); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 243dd1efcdbf..4f8464658daf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -118,10 +118,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, return ret; pqn->q->gws = mem; - pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0; + pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, NULL); } void kfd_process_dequeue_from_all_devices(struct kfd_process *p) @@ -394,8 +394,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pdd->qpd.num_gws = 0; } - kfree(pqn->q->properties.cu_mask); - pqn->q->properties.cu_mask = NULL; uninit_queue(pqn->q); } @@ -411,8 +409,8 @@ err_destroy_queue: return retval; } -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p) +int pqm_update_queue_properties(struct process_queue_manager *pqm, + unsigned int qid, struct queue_properties *p) { int retval; struct process_queue_node *pqn; @@ -429,15 +427,15 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, pqn->q->properties.priority = p->priority; retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, NULL); if (retval != 0) return retval; return 0; } -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, - struct queue_properties *p) +int pqm_update_mqd(struct process_queue_manager *pqm, + unsigned int qid, struct mqd_update_info *minfo) { int retval; struct process_queue_node *pqn; @@ -448,16 +446,8 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, return -EFAULT; } - /* Free the old CU mask memory if it is already allocated, then - * allocate memory for the new CU mask. - */ - kfree(pqn->q->properties.cu_mask); - - pqn->q->properties.cu_mask_count = p->cu_mask_count; - pqn->q->properties.cu_mask = p->cu_mask; - retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, - pqn->q); + pqn->q, minfo); if (retval != 0) return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ed4bc5f844ce..deae12dc777d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint64_t throttle_bitmask) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; /* * ThermalThrottle msg = throttle_bitmask(8): * thermal_interrupt_count(16): @@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, - atomic64_read(&adev->smu.throttle_int_counter)); + atomic64_read(&dev->adev->smu.throttle_int_counter)); add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); } void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; struct amdgpu_task_info task_info; /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + @@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) return; memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, pasid, &task_info); + amdgpu_vm_get_task_info(dev->adev, pasid, &task_info); /* Report VM faults from user applications, not retry from kernel */ if (!task_info.pid) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9fc8021bb0ab..755265f6c53b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -33,6 +33,11 @@ #include "kfd_svm.h" #include "kfd_migrate.h" +#ifdef dev_fmt +#undef dev_fmt +#endif +#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__ + #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 /* Long enough to ensure no retry fault comes after svm range is restored and @@ -45,7 +50,9 @@ static bool svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq); - +static int +svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l); static const struct mmu_interval_notifier_ops svm_range_mn_ops = { .invalidate = svm_range_cpu_invalidate_pagetables, }; @@ -118,6 +125,13 @@ static void svm_range_remove_notifier(struct svm_range *prange) mmu_interval_notifier_remove(&prange->notifier); } +static bool +svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) +{ + return dma_addr && !dma_mapping_error(dev, dma_addr) && + !(dma_addr & SVM_RANGE_VRAM_DOMAIN); +} + static int svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, unsigned long offset, unsigned long npages, @@ -139,8 +153,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, addr += offset; for (i = 0; i < npages; i++) { - if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), - "leaking dma mapping\n")) + if (svm_is_valid_dma_mapping_addr(dev, addr[i])) dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); page = hmm_pfn_to_page(hmm_pfns[i]); @@ -152,17 +165,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, bo_adev->vm_manager.vram_base_offset - bo_adev->kfd.dev->pgmap.range.start; addr[i] |= SVM_RANGE_VRAM_DOMAIN; - pr_debug("vram address detected: 0x%llx\n", addr[i]); + pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); continue; } addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); r = dma_mapping_error(dev, addr[i]); if (r) { - pr_debug("failed %d dma_map_page\n", r); + dev_err(dev, "failed %d dma_map_page\n", r); return r; } - pr_debug("dma mapping 0x%llx for page addr 0x%lx\n", - addr[i] >> PAGE_SHIFT, page_to_pfn(page)); + pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", + addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } return 0; } @@ -180,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pr_debug("mapping to gpu idx 0x%x\n", gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx); @@ -188,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_dma_map_dev(adev, prange, offset, npages, + r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, hmm_pfns, gpuidx); if (r) break; @@ -209,9 +220,9 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, return; for (i = offset; i < offset + npages; i++) { - if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) + if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) continue; - pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); + pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); dma_addr[i] = 0; } @@ -568,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) return NULL; } - return (struct amdgpu_device *)pdd->dev->kgd; + return pdd->dev->adev; } struct kfd_process_device * @@ -580,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); if (r) { pr_debug("failed to get device id by adev %p\n", adev); return NULL; @@ -1040,8 +1051,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (KFD_GC_VERSION(adev->kfd.dev)) { + case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1057,7 +1068,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1116,7 +1127,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); struct kfd_process_device *pdd; struct dma_fence *fence = NULL; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; int r = 0; @@ -1132,9 +1142,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_unmap_from_gpu(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), start, last, &fence); if (r) break; @@ -1146,7 +1156,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, if (r) break; } - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, + amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev, p->pasid, TLB_FLUSH_HEAVYWEIGHT); } @@ -1165,7 +1175,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned long last_start; int last_domain; int r = 0; - int64_t i; + int64_t i, j; last_start = prange->start + offset; @@ -1178,7 +1188,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, for (i = offset; i < offset + npages; i++) { last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; - if ((prange->start + i) < prange->last && + + /* Collect all pages in the same address range and memory domain + * that can be mapped with a single call to update mapping. + */ + if (i < offset + npages - 1 && last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) continue; @@ -1201,6 +1215,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, NULL, dma_addr, &vm->last_update, &table_freed); + + for (j = last_start - prange->start; j <= i; j++) + dma_addr[j] |= last_domain; + if (r) { pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); goto out; @@ -1222,8 +1240,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct kfd_process *p; p = container_of(prange->svms, struct kfd_process, svms); - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, - p->pasid, TLB_FLUSH_LEGACY); + amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY); } out: return r; @@ -1236,7 +1253,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, { struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; @@ -1255,19 +1271,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; pdd = kfd_bind_process_to_device(pdd->dev, p); if (IS_ERR(pdd)) return -EINVAL; - if (bo_adev && adev != bo_adev && - !amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (bo_adev && pdd->dev->adev != bo_adev && + !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { pr_debug("cannot map to device idx %d\n", gpuidx); continue; } - r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv), prange, offset, npages, readonly, prange->dma_addr[gpuidx], bo_adev, wait ? &fence : NULL); @@ -1293,7 +1308,7 @@ struct svm_validate_context { struct svm_range *prange; bool intr; unsigned long bitmap[MAX_GPU_INSTANCE]; - struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1]; + struct ttm_validate_buffer tv[MAX_GPU_INSTANCE]; struct list_head validate_list; struct ww_acquire_ctx ticket; }; @@ -1301,7 +1316,6 @@ struct svm_validate_context { static int svm_range_reserve_bos(struct svm_validate_context *ctx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct amdgpu_vm *vm; uint32_t gpuidx; int r; @@ -1313,18 +1327,12 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; vm = drm_priv_to_vm(pdd->drm_priv); ctx->tv[gpuidx].bo = &vm->root.bo->tbo; ctx->tv[gpuidx].num_shared = 4; list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); } - if (ctx->prange->svm_bo && ctx->prange->ttm_res) { - ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo; - ctx->tv[MAX_GPU_INSTANCE].num_shared = 1; - list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list); - } r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, ctx->intr, NULL); @@ -1340,9 +1348,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) r = -EINVAL; goto unreserve_out; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), + r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), svm_range_bo_validate, NULL); if (r) { pr_debug("failed %d validate pt bos\n", r); @@ -1365,12 +1373,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pdd = kfd_process_device_from_gpuidx(p, gpuidx); - adev = (struct amdgpu_device *)pdd->dev->kgd; - return SVM_ADEV_PGMAP_OWNER(adev); + return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } /* @@ -1445,7 +1451,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, /* This should never happen. actual_loc gets set by * svm_migrate_ram_to_vram after allocating a BO. */ - WARN(1, "VRAM BO missing during validation\n"); + WARN_ONCE(1, "VRAM BO missing during validation\n"); return -EINVAL; } @@ -1480,9 +1486,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; + WRITE_ONCE(p->svms.faulting_task, current); r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, addr, npages, &hmm_range, readonly, true, owner); + WRITE_ONCE(p->svms.faulting_task, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; @@ -1538,7 +1546,7 @@ unreserve_out: * Context: Returns with mmap write lock held, pending deferred work flushed * */ -static void +void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm) { @@ -1556,7 +1564,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1576,12 +1583,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1634,7 +1639,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -1948,23 +1952,30 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) static void svm_range_drain_retry_fault(struct svm_range_list *svms) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) continue; pr_debug("drain retry fault gpu %d svms %p\n", i, svms); - adev = (struct amdgpu_device *)pdd->dev->kgd; - amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); + amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev, + &pdd->dev->adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1972,35 +1983,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - /* Make sure no stale retry fault coming after range is freed */ - if (prange->work_item.op == SVM_OP_UNMAP_RANGE) - svm_range_drain_retry_fault(prange->svms); - - mm = prange->work_item.mm; - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2016,12 +2033,13 @@ static void svm_range_deferred_list_work(struct work_struct *work) svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2108,6 +2126,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2245,7 +2269,7 @@ svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, * migration if actual loc is not best location, then update GPU page table * mapping to the best location. * - * If vm fault gpu is range preferred loc, the best_loc is preferred loc. + * If the preferred loc is accessible by faulting GPU, use preferred loc. * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then * if range actual loc is cpu, best_loc is cpu @@ -2262,21 +2286,29 @@ svm_range_best_restore_location(struct svm_range *prange, struct amdgpu_device *adev, int32_t *gpuidx) { - struct amdgpu_device *bo_adev; + struct amdgpu_device *bo_adev, *preferred_adev; struct kfd_process *p; uint32_t gpuid; int r; p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; } - if (prange->preferred_loc == gpuid) + if (prange->preferred_loc == gpuid || + prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { return prange->preferred_loc; + } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { + preferred_adev = svm_range_get_adev_by_id(prange, + prange->preferred_loc); + if (amdgpu_xgmi_same_hive(adev, preferred_adev)) + return prange->preferred_loc; + /* fall through */ + } if (test_bit(*gpuidx, prange->bitmap_access)) return gpuid; @@ -2294,9 +2326,11 @@ svm_range_best_restore_location(struct svm_range *prange, return -1; } + static int svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, - unsigned long *start, unsigned long *last) + unsigned long *start, unsigned long *last, + bool *is_heap_stack) { struct vm_area_struct *vma; struct interval_tree_node *node; @@ -2307,6 +2341,12 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, pr_debug("VMA does not exist in address [0x%llx]\n", addr); return -EFAULT; } + + *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk && + vma->vm_end >= vma->vm_mm->start_brk) || + (vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack); + start_limit = max(vma->vm_start >> PAGE_SHIFT, (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); end_limit = min(vma->vm_end >> PAGE_SHIFT, @@ -2336,13 +2376,64 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, *start = start_limit; *last = end_limit - 1; - pr_debug("vma start: 0x%lx start: 0x%lx vma end: 0x%lx last: 0x%lx\n", - vma->vm_start >> PAGE_SHIFT, *start, - vma->vm_end >> PAGE_SHIFT, *last); + pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", + vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, + *start, *last, *is_heap_stack); return 0; +} +static int +svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l) +{ + struct amdgpu_bo_va_mapping *mapping; + struct interval_tree_node *node; + struct amdgpu_bo *bo = NULL; + unsigned long userptr; + uint32_t i; + int r; + + for (i = 0; i < p->n_pdds; i++) { + struct amdgpu_vm *vm; + + if (!p->pdds[i]->drm_priv) + continue; + + vm = drm_priv_to_vm(p->pdds[i]->drm_priv); + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + /* Check userptr by searching entire vm->va interval tree */ + node = interval_tree_iter_first(&vm->va, 0, ~0ULL); + while (node) { + mapping = container_of((struct rb_node *)node, + struct amdgpu_bo_va_mapping, rb); + bo = mapping->bo_va->base.bo; + + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, + start << PAGE_SHIFT, + last << PAGE_SHIFT, + &userptr)) { + node = interval_tree_iter_next(node, 0, ~0ULL); + continue; + } + + pr_debug("[0x%llx 0x%llx] already userptr mapped\n", + start, last); + if (bo_s && bo_l) { + *bo_s = userptr >> PAGE_SHIFT; + *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1; + } + amdgpu_bo_unreserve(vm->root.bo); + return -EADDRINUSE; + } + amdgpu_bo_unreserve(vm->root.bo); + } + return 0; } + static struct svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct kfd_process *p, @@ -2352,21 +2443,42 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, struct svm_range *prange = NULL; unsigned long start, last; uint32_t gpuid, gpuidx; + bool is_heap_stack; + uint64_t bo_s = 0; + uint64_t bo_l = 0; + int r; - if (svm_range_get_range_boundaries(p, addr, &start, &last)) + if (svm_range_get_range_boundaries(p, addr, &start, &last, + &is_heap_stack)) return NULL; + r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); + if (r != -EADDRINUSE) + r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); + + if (r == -EADDRINUSE) { + if (addr >= bo_s && addr <= bo_l) + return NULL; + + /* Create one page svm range if 2MB range overlapping */ + start = addr; + last = addr; + } + prange = svm_range_new(&p->svms, start, last); if (!prange) { pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange); return NULL; } + if (is_heap_stack) + prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; + svm_range_add_to_svms(prange); svm_range_add_notifier_locked(mm, prange); @@ -2425,7 +2537,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); if (r < 0) return; } @@ -2439,20 +2551,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2470,6 +2575,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2480,7 +2586,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2491,10 +2597,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; + r = 0; goto out; } @@ -2532,6 +2647,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2540,10 +2656,21 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; + goto out_unlock_range; + } + + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2621,6 +2748,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2641,6 +2776,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -2654,8 +2790,67 @@ int svm_range_list_init(struct kfd_process *p) } /** + * svm_range_check_vm - check if virtual address range mapped already + * @p: current kfd_process + * @start: range start address, in pages + * @last: range last address, in pages + * @bo_s: mapping start address in pages if address range already mapped + * @bo_l: mapping last address in pages if address range already mapped + * + * The purpose is to avoid virtual address ranges already allocated by + * kfd_ioctl_alloc_memory_of_gpu ioctl. + * It looks for each pdd in the kfd_process. + * + * Context: Process context + * + * Return 0 - OK, if the range is not mapped. + * Otherwise error code: + * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu + * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + */ +static int +svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, + uint64_t *bo_s, uint64_t *bo_l) +{ + struct amdgpu_bo_va_mapping *mapping; + struct interval_tree_node *node; + uint32_t i; + int r; + + for (i = 0; i < p->n_pdds; i++) { + struct amdgpu_vm *vm; + + if (!p->pdds[i]->drm_priv) + continue; + + vm = drm_priv_to_vm(p->pdds[i]->drm_priv); + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) + return r; + + node = interval_tree_iter_first(&vm->va, start, last); + if (node) { + pr_debug("range [0x%llx 0x%llx] already TTM mapped\n", + start, last); + mapping = container_of((struct rb_node *)node, + struct amdgpu_bo_va_mapping, rb); + if (bo_s && bo_l) { + *bo_s = mapping->start; + *bo_l = mapping->last; + } + amdgpu_bo_unreserve(vm->root.bo); + return -EADDRINUSE; + } + amdgpu_bo_unreserve(vm->root.bo); + } + + return 0; +} + +/** * svm_range_is_valid - check if virtual address range is valid - * @mm: current process mm_struct + * @p: current kfd_process * @start: range start address, in pages * @size: range size, in pages * @@ -2664,28 +2859,28 @@ int svm_range_list_init(struct kfd_process *p) * Context: Process context * * Return: - * true - valid svm range - * false - invalid svm range + * 0 - OK, otherwise error code */ -static bool -svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size) +static int +svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) { const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; struct vm_area_struct *vma; unsigned long end; + unsigned long start_unchg = start; start <<= PAGE_SHIFT; end = start + (size << PAGE_SHIFT); - do { - vma = find_vma(mm, start); + vma = find_vma(p->mm, start); if (!vma || start < vma->vm_start || (vma->vm_flags & device_vma)) - return false; + return -EFAULT; start = min(end, vma->vm_end); } while (start < end); - return true; + return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL, + NULL); } /** @@ -2774,7 +2969,6 @@ svm_range_best_prefetch_location(struct svm_range *prange) uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; @@ -2802,12 +2996,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) pr_debug("failed to get device by idx 0x%x\n", gpuidx); continue; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - if (adev == bo_adev) + if (pdd->dev->adev == bo_adev) continue; - if (!amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { best_loc = 0; break; } @@ -2933,6 +3126,8 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) struct svm_range *prange = list_first_entry(&svm_bo->range_list, struct svm_range, svm_bo_list); + int retries = 3; + list_del_init(&prange->svm_bo_list); spin_unlock(&svm_bo->list_lock); @@ -2940,7 +3135,11 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work) prange->start, prange->last); mutex_lock(&prange->migrate_mutex); - svm_migrate_vram_to_ram(prange, svm_bo->eviction_fence->mm); + do { + svm_migrate_vram_to_ram(prange, + svm_bo->eviction_fence->mm); + } while (prange->actual_loc && --retries); + WARN(prange->actual_loc, "Migration failed during eviction"); mutex_lock(&prange->lock); prange->svm_bo = NULL; @@ -2965,7 +3164,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -2984,13 +3182,11 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); - r = -EFAULT; + r = svm_range_is_valid(p, start, size); + if (r) { + pr_debug("invalid range r=%d\n", r); mmap_write_unlock(mm); goto out; } @@ -3061,8 +3257,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); @@ -3092,6 +3286,7 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t flags_or = 0; int gpuidx; uint32_t i; + int r = 0; pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); @@ -3105,12 +3300,12 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, flush_work(&p->svms.deferred_list_work); mmap_read_lock(mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); - mmap_read_unlock(mm); - return -EINVAL; - } + r = svm_range_is_valid(p, start, size); mmap_read_unlock(mm); + if (r) { + pr_debug("invalid range r=%d\n", r); + return r; + } for (i = 0; i < nattr; i++) { switch (attrs[i].type) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index c6ec55354c7b..6dc91c33e80f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -188,6 +188,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm, void *owner); struct kfd_process_device * svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); +void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 98cca5f2b27f..2d44b26b6657 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -113,7 +113,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; struct kfd_dev *device = NULL; @@ -121,7 +121,7 @@ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu && top_dev->gpu->kgd == kgd) { + if (top_dev->gpu && top_dev->gpu->adev == adev) { device = top_dev->gpu; break; } @@ -515,7 +515,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, HSA_CAP_WATCH_POINTS_TOTALBITS_MASK); } - if (dev->gpu->device_info->asic_family == CHIP_TONGA) + if (dev->gpu->adev->asic_type == CHIP_TONGA) dev->node_props.capability |= HSA_CAP_AQL_QUEUE_DOUBLE_MAP; @@ -531,7 +531,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", - amdgpu_amdkfd_get_unique_id(dev->gpu->kgd)); + dev->gpu->adev->unique_id); } @@ -1106,7 +1106,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info); local_mem_size = local_mem_info.local_mem_size_private + local_mem_info.local_mem_size_public; @@ -1189,7 +1189,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; @@ -1217,8 +1217,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, /* set gpu (dev) flags. */ } else { if (!dev->gpu->pci_atomic_requested || - dev->gpu->device_info->asic_family == - CHIP_HAWAII) + dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; } @@ -1239,7 +1238,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev, */ if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && - to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) { + KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) { outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; } @@ -1286,7 +1285,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu) void *crat_image = NULL; size_t image_size = 0; int proximity_domain; - struct amdgpu_device *adev; INIT_LIST_HEAD(&temp_topology_device_list); @@ -1296,6 +1294,22 @@ int kfd_topology_add_device(struct kfd_dev *gpu) proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); + /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ + if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) { + struct kfd_topology_device *top_dev; + + down_read(&topology_lock); + + list_for_each_entry(top_dev, &topology_device_list, list) { + if (top_dev->gpu) + break; + + top_dev->node_props.hive_id = gpu->hive_id; + } + + up_read(&topology_lock); + } + /* Check to see if this gpu device exists in the topology_device_list. * If so, assign the gpu to that device, * else create a Virtual CRAT for this gpu device and then parse that @@ -1354,7 +1368,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * needed for the topology */ - amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); strncpy(dev->node_props.name, gpu->device_info->asic_name, KFD_TOPOLOGY_PUBLIC_NAME_SIZE); @@ -1366,33 +1380,32 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.vendor_id = gpu->pdev->vendor; dev->node_props.device_id = gpu->pdev->device; dev->node_props.capability |= - ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) << - HSA_CAP_ASIC_REVISION_SHIFT) & + ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & HSA_CAP_ASIC_REVISION_MASK); dev->node_props.location_id = pci_dev_id(gpu->pdev); dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); dev->node_props.max_engine_clk_fcompute = - amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); + amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = gpu->shared_resources.drm_render_minor; dev->node_props.hive_id = gpu->hive_id; - dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; + dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = - gpu->device_info->num_xgmi_sdma_engines; + kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = gpu->device_info->num_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? - amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; + dev->gpu->adev->gds.gws_size : 0; dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); - switch (dev->gpu->device_info->asic_family) { + switch (dev->gpu->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_TONGA: @@ -1411,30 +1424,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << - HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & - HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->gpu->device_info->asic_family); + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + else + WARN(1, "Unexpected ASIC family %u", + dev->gpu->adev->asic_type); } /* @@ -1451,25 +1448,25 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * because it doesn't consider masked out CUs * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd */ - if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { + if (dev->gpu->adev->asic_type == CHIP_CARRIZO) { dev->node_props.simd_count = cu_info.simd_per_cu * cu_info.cu_active_number; dev->node_props.max_waves_per_simd = 10; } - adev = (struct amdgpu_device *)(dev->gpu->kgd); /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ dev->node_props.capability |= - ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? HSA_CAP_SRAM_EDCSUPPORTED : 0; - dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? + dev->node_props.capability |= + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? HSA_CAP_MEM_EDCSUPPORTED : 0; - if (adev->asic_type != CHIP_VEGA10) - dev->node_props.capability |= (adev->ras_enabled != 0) ? + if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1)) + dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index a8db017c9b8e..f0cc59d2fd5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -25,38 +25,11 @@ #include <linux/types.h> #include <linux/list.h> +#include <linux/kfd_sysfs.h> #include "kfd_crat.h" #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 -#define HSA_CAP_HOT_PLUGGABLE 0x00000001 -#define HSA_CAP_ATS_PRESENT 0x00000002 -#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 -#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 -#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 -#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 -#define HSA_CAP_VA_LIMIT 0x00000040 -#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12 - -#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 -#define HSA_CAP_DOORBELL_TYPE_1_0 0x1 -#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 -#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 - -#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 /* Old buggy user mode depends on this being 0 */ -#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000 -#define HSA_CAP_RASEVENTNOTIFY 0x00200000 -#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000 -#define HSA_CAP_ASIC_REVISION_SHIFT 22 -#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 -#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 -#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 -#define HSA_CAP_RESERVED 0xe00f8000 - struct kfd_node_properties { uint64_t hive_id; uint32_t cpu_cores_count; @@ -93,17 +66,6 @@ struct kfd_node_properties { char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; -#define HSA_MEM_HEAP_TYPE_SYSTEM 0 -#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 -#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 -#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 -#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 -#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 - -#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 -#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 -#define HSA_MEM_FLAGS_RESERVED 0xfffffffc - struct kfd_mem_properties { struct list_head list; uint32_t heap_type; @@ -116,12 +78,6 @@ struct kfd_mem_properties { struct attribute attr; }; -#define HSA_CACHE_TYPE_DATA 0x00000001 -#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 -#define HSA_CACHE_TYPE_CPU 0x00000004 -#define HSA_CACHE_TYPE_HSACU 0x00000008 -#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 - struct kfd_cache_properties { struct list_head list; uint32_t processor_id_low; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 7dffc04a557e..127667e549c1 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -25,6 +25,8 @@ config DRM_AMD_DC_HDCP config DRM_AMD_DC_SI bool "AMD DC support for Southern Islands ASICs" + depends on DRM_AMDGPU_SI + depends on DRM_AMD_DC default n help Choose this option to enable new AMD DC support for SI asics diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9b1fc54555ee..116a280d8a20 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -29,6 +29,7 @@ #include "dm_services_types.h" #include "dc.h" #include "dc_link_dp.h" +#include "link_enc_cfg.h" #include "dc/inc/core_types.h" #include "dal_asic_id.h" #include "dmub/dmub_srv.h" @@ -50,6 +51,7 @@ #include <drm/drm_hdcp.h> #endif #include "amdgpu_pm.h" +#include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" @@ -215,6 +217,9 @@ static void handle_cursor_update(struct drm_plane *plane, static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); +static void handle_hpd_rx_irq(void *param); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -616,7 +621,122 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); } -#endif +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ + +/** + * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub AUX or SET_CONFIG command completion processing callback + * Copies dmub notification to DM which is to be read by AUX command. + * issuing thread and also signals the event to wake up the thread. + */ +void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + if (adev->dm.dmub_notify) + memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); + if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) + complete(&adev->dm.dmub_aux_transfer_done); +} + +/** + * dmub_hpd_callback - DMUB HPD interrupt processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub Hpd interrupt processing callback. Gets displayindex through the + * ink index and calls helper to do the processing. + */ +void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *hpd_aconnector = NULL; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct dc_link *link; + uint8_t link_index = 0; + struct drm_device *dev = adev->dm.ddev; + + if (adev == NULL) + return; + + if (notify == NULL) { + DRM_ERROR("DMUB HPD callback notification was NULL"); + return; + } + + if (notify->link_index > adev->dm.dc->link_count) { + DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + link_index = notify->link_index; + link = adev->dm.dc->links[link_index]; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (link && aconnector->dc_link == link) { + DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + hpd_aconnector = aconnector; + break; + } + } + drm_connector_list_iter_end(&iter); + + if (hpd_aconnector) { + if (notify->type == DMUB_NOTIFICATION_HPD) + handle_hpd_irq_helper(hpd_aconnector); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + handle_hpd_rx_irq(hpd_aconnector); + } +} + +/** + * register_dmub_notify_callback - Sets callback for DMUB notify + * @adev: amdgpu_device pointer + * @type: Type of dmub notification + * @callback: Dmub interrupt callback function + * @dmub_int_thread_offload: offload indicator + * + * API to register a dmub callback handler for a dmub notification + * Also sets indicator whether callback processing to be offloaded. + * to dmub interrupt handling thread + * Return: true if successfully registered, false if there is existing registration + */ +bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, +dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +{ + if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { + adev->dm.dmub_callback[type] = callback; + adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; + } else + return false; + + return true; +} + +static void dm_handle_hpd_work(struct work_struct *work) +{ + struct dmub_hpd_work *dmub_hpd_wrk; + + dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); + + if (!dmub_hpd_wrk->dmub_notify) { + DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + return; + } + + if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { + dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, + dmub_hpd_wrk->dmub_notify); + } + + kfree(dmub_hpd_wrk->dmub_notify); + kfree(dmub_hpd_wrk); + +} #define DMUB_TRACE_MAX_READ 64 /** @@ -634,22 +754,50 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; uint32_t count = 0; + struct dmub_hpd_work *dmub_hpd_wrk; + struct dc_link *plink = NULL; - if (dc_enable_dmub_notifications(adev->dm.dc)) { - if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { - do { - dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); - } while (notify.pending_notification); - - if (adev->dm.dmub_notify) - memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification)); - if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) - complete(&adev->dm.dmub_aux_transfer_done); - // TODO : HPD Implementation + if (dc_enable_dmub_notifications(adev->dm.dc) && + irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { - } else { - DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); - } + do { + dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); + if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { + DRM_ERROR("DM: notify type %d invalid!", notify.type); + continue; + } + if (!dm->dmub_callback[notify.type]) { + DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + continue; + } + if (dm->dmub_thread_offload[notify.type] == true) { + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); + if (!dmub_hpd_wrk) { + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + return; + } + dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); + if (!dmub_hpd_wrk->dmub_notify) { + kfree(dmub_hpd_wrk); + DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + return; + } + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); + if (dmub_hpd_wrk->dmub_notify) + memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); + dmub_hpd_wrk->adev = adev; + if (notify.type == DMUB_NOTIFICATION_HPD) { + plink = adev->dm.dc->links[notify.link_index]; + if (plink) { + plink->hpd_status = + notify.hpd_status == DP_HPD_PLUG; + } + } + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); + } else { + dm->dmub_callback[notify.type](adev, ¬ify); + } + } while (notify.pending_notification); } @@ -667,9 +815,10 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) } while (count <= DMUB_TRACE_MAX_READ); - ASSERT(count <= DMUB_TRACE_MAX_READ); + if (count > DMUB_TRACE_MAX_READ) + DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) @@ -873,6 +1022,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const unsigned char *fw_inst_const, *fw_bss_data; uint32_t i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; + struct dc *dc = adev->dm.dc; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ @@ -959,6 +1109,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) { + hw_params.dpia_supported = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; +#endif + } + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); @@ -998,6 +1161,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ uint32_t agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; + memset(pa_config, 0, sizeof(*pa_config)); + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); @@ -1081,6 +1246,114 @@ static void vblank_control_worker(struct work_struct *work) } #endif + +static void dm_handle_hpd_rx_offload_work(struct work_struct *work) +{ + struct hpd_rx_irq_offload_work *offload_work; + struct amdgpu_dm_connector *aconnector; + struct dc_link *dc_link; + struct amdgpu_device *adev; + enum dc_connection_type new_connection_type = dc_connection_none; + unsigned long flags; + + offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); + aconnector = offload_work->offload_wq->aconnector; + + if (!aconnector) { + DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + goto skip; + } + + adev = drm_to_adev(aconnector->base.dev); + dc_link = aconnector->dc_link; + + mutex_lock(&aconnector->hpd_lock); + if (!dc_link_detect_sink(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + mutex_unlock(&aconnector->hpd_lock); + + if (new_connection_type == dc_connection_none) + goto skip; + + if (amdgpu_in_reset(adev)) + goto skip; + + mutex_lock(&adev->dm.dc_lock); + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) + dc_link_dp_handle_automated_test(dc_link); + else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && + dc_link_dp_allow_hpd_rx_irq(dc_link)) { + dc_link_dp_handle_link_loss(dc_link); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_link_loss = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + } + mutex_unlock(&adev->dm.dc_lock); + +skip: + kfree(offload_work); + +} + +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +{ + int max_caps = dc->caps.max_links; + int i = 0; + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; + + hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); + + if (!hpd_rx_offload_wq) + return NULL; + + + for (i = 0; i < max_caps; i++) { + hpd_rx_offload_wq[i].wq = + create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); + + if (hpd_rx_offload_wq[i].wq == NULL) { + DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + return NULL; + } + + spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); + } + + return hpd_rx_offload_wq; +} + +struct amdgpu_stutter_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool dm_should_disable_stutter(struct pci_dev *pdev) +{ + const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1113,6 +1386,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.asic_id.pci_revision_id = adev->pdev->revision; init_data.asic_id.hw_internal_rev = adev->external_rev_id; + init_data.asic_id.chip_id = adev->pdev->device; init_data.asic_id.vram_width = adev->gmc.vram_width; /* TODO: initialize init_data.asic_id.vram_type here!!!! */ @@ -1135,17 +1409,35 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_CARRIZO: case CHIP_STONEY: - case CHIP_RAVEN: - case CHIP_RENOIR: - init_data.flags.gpu_vm_support = true; - if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) - init_data.flags.disable_dmcu = true; - break; - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: init_data.flags.gpu_vm_support = true; break; default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + init_data.flags.gpu_vm_support = true; + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: + init_data.flags.disable_dmcu = true; + } + break; + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + init_data.flags.gpu_vm_support = true; + break; + case IP_VERSION(2, 0, 3): + init_data.flags.disable_dmcu = true; + break; + default: + break; + } break; } @@ -1163,6 +1455,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.power_down_display_on_boot = true; + if (check_seamless_boot_capability(adev)) { + init_data.flags.power_down_display_on_boot = false; + init_data.flags.allow_seamless_boot_optimization = true; + DRM_INFO("Seamless boot condition check passed\n"); + } + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1181,12 +1479,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + if (dm_should_disable_stutter(adev->pdev)) + adev->dm.dc->debug.disable_stutter = true; if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { adev->dm.dc->debug.disable_dsc = true; + adev->dm.dc->debug.disable_dsc_edp = true; + } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -1199,6 +1501,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + if (!adev->dm.hpd_rx_offload_wq) { + DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + goto error; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { struct dc_phy_addr_space_config pa_config; @@ -1230,7 +1538,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #endif #ifdef CONFIG_DRM_AMD_DC_HDCP - if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) { + if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) @@ -1251,7 +1559,29 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); goto error; } + + adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); + if (!adev->dm.delayed_hpd_wq) { + DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + goto error; + } + amdgpu_dm_outbox_init(adev); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, + dmub_aux_setconfig_callback, false)) { + DRM_ERROR("amdgpu: fail to register dmub aux callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif /* CONFIG_DRM_AMD_DC_DCN */ } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -1333,6 +1663,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (dc_enable_dmub_notifications(adev->dm.dc)) { kfree(adev->dm.dmub_notify); adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; } if (adev->dm.dmub_bo) @@ -1340,6 +1672,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); + if (adev->dm.hpd_rx_offload_wq) { + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + if (adev->dm.hpd_rx_offload_wq[i].wq) { + destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); + adev->dm.hpd_rx_offload_wq[i].wq = NULL; + } + } + + kfree(adev->dm.hpd_rx_offload_wq); + adev->dm.hpd_rx_offload_wq = NULL; + } + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -1393,15 +1737,6 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_RENOIR: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: return 0; case CHIP_NAVI12: fw_name_dmcu = FIRMWARE_NAVI12_DMCU; @@ -1415,6 +1750,21 @@ static int load_dmcu_fw(struct amdgpu_device *adev) return 0; break; default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + return 0; + default: + break; + } DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } @@ -1493,35 +1843,37 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) enum dmub_status status; int r; - switch (adev->asic_type) { - case CHIP_RENOIR: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): dmub_asic = DMUB_ASIC_DCN21; fw_name_dmub = FIRMWARE_RENOIR_DMUB; if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; break; - case CHIP_SIENNA_CICHLID: - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; - break; - case CHIP_NAVY_FLOUNDER: - dmub_asic = DMUB_ASIC_DCN30; - fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + case IP_VERSION(3, 0, 0): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { + dmub_asic = DMUB_ASIC_DCN30; + fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; + } else { + dmub_asic = DMUB_ASIC_DCN30; + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + } break; - case CHIP_VANGOGH: + case IP_VERSION(3, 0, 1): dmub_asic = DMUB_ASIC_DCN301; fw_name_dmub = FIRMWARE_VANGOGH_DMUB; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(3, 0, 2): dmub_asic = DMUB_ASIC_DCN302; fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(3, 0, 3): dmub_asic = DMUB_ASIC_DCN303; fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; break; - case CHIP_YELLOW_CARP: - dmub_asic = DMUB_ASIC_DCN31; + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; break; @@ -1717,6 +2069,7 @@ static int dm_late_init(void *handle) linear_lut[i] = 0xFFFF * i / 15; params.set = 0; + params.backlight_ramping_override = false; params.backlight_ramping_start = 0xCCCC; params.backlight_ramping_reduction = 0xCCCCCCCC; params.backlight_lut_array_size = 16; @@ -1819,10 +2172,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) * therefore, this function apply to navi10/12/14 but not Renoir * * */ - switch(adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 0): break; default: return 0; @@ -1959,14 +2311,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - - res = dc_validate_global_state(dc, context, false); - - if (res != DC_OK) { - DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); - goto fail; - } - res = dc_commit_state(dc, context); fail: @@ -1976,6 +2320,16 @@ context_alloc_fail: return res; } +static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) +{ + int i; + + if (dm->hpd_rx_offload_wq) { + for (i = 0; i < dm->dc->caps.max_links; i++) + flush_workqueue(dm->hpd_rx_offload_wq[i].wq); + } +} + static int dm_suspend(void *handle) { struct amdgpu_device *adev = handle; @@ -1997,6 +2351,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + return ret; } @@ -2007,6 +2363,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -2153,7 +2511,7 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link) +static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) { struct dc_stream_state *stream_state; struct amdgpu_dm_connector *aconnector = link->priv; @@ -2174,6 +2532,7 @@ static void dm_set_dpms_off(struct dc_link *link) } stream_update.stream = stream_state; + acrtc_state->force_dpms_off = true; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, stream_state, &stream_update, stream_state->ctx->dc->current_state); @@ -2202,6 +2561,23 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround it needs to be + * cleared here. + */ + link_enc_cfg_init(dm->dc, dc_state); + + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2213,20 +2589,11 @@ static int dm_resume(void *handle) for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* - * Resource allocation happens for link encoders for newer ASIC in - * dc_validate_global_state, so we need to revalidate it. - * - * This shouldn't fail (it passed once before), so warn if it does. - */ - WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); -#endif WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -2249,6 +2616,10 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + /* Before powering on DC we need to re-initialize DMUB. */ r = dm_dmub_hw_init(adev); if (r) @@ -2611,20 +2982,22 @@ void amdgpu_dm_update_connector_after_detect( dc_sink_release(sink); } -static void handle_hpd_irq(void *param) +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) { - struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); -#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -#endif + struct dm_crtc_state *dm_crtc_state = NULL; if (adev->dm.disable_hpd_irq) return; + if (dm_con_state->base.state && dm_con_state->base.crtc) + dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( + dm_con_state->base.state, + dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -2646,18 +3019,18 @@ static void handle_hpd_irq(void *param) if (aconnector->base.force && new_connection_type == dc_connection_none) { emulated_link_detect(aconnector->dc_link); - drm_modeset_lock_all(dev); dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none) - dm_set_dpms_off(aconnector->dc_link); + aconnector->dc_link->type == dc_connection_none && + dm_crtc_state) + dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); amdgpu_dm_update_connector_after_detect(aconnector); @@ -2666,13 +3039,21 @@ static void handle_hpd_irq(void *param) drm_modeset_unlock_all(dev); if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } mutex_unlock(&aconnector->hpd_lock); } -static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) +static void handle_hpd_irq(void *param) +{ + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; + + handle_hpd_irq_helper(aconnector); + +} + +static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) { uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t dret; @@ -2750,6 +3131,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); } +static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, + union hpd_irq_data hpd_irq_data) +{ + struct hpd_rx_irq_offload_work *offload_work = + kzalloc(sizeof(*offload_work), GFP_KERNEL); + + if (!offload_work) { + DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + return; + } + + INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); + offload_work->data = hpd_irq_data; + offload_work->offload_wq = offload_wq; + + queue_work(offload_wq->wq, &offload_work->work); + DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); +} + static void handle_hpd_rx_irq(void *param) { struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; @@ -2761,14 +3161,16 @@ static void handle_hpd_rx_irq(void *param) enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); union hpd_irq_data hpd_irq_data; - bool lock_flag = 0; + bool link_loss = false; + bool has_left_work = false; + int idx = aconnector->base.index; + struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); if (adev->dm.disable_hpd_irq) return; - /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be @@ -2776,43 +3178,41 @@ static void handle_hpd_rx_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); - read_hpd_rx_irq_data(dc_link, &hpd_irq_data); + result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, + &link_loss, true, &has_left_work); - if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - (dc_link->type == dc_connection_mst_branch)) { - if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { - result = true; - dm_handle_hpd_rx_irq(aconnector); - goto out; - } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - result = false; - dm_handle_hpd_rx_irq(aconnector); + if (!has_left_work) + goto out; + + if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; + } + + if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { + if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg(aconnector); goto out; } - } - /* - * TODO: We need the lock to avoid touching DC state while it's being - * modified during automated compliance testing, or when link loss - * happens. While this should be split into subhandlers and proper - * interfaces to avoid having to conditionally lock like this in the - * outer layer, we need this workaround temporarily to allow MST - * lightup in some scenarios to avoid timeout. - */ - if (!amdgpu_in_reset(adev) && - (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) || - hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) { - mutex_lock(&adev->dm.dc_lock); - lock_flag = 1; - } + if (link_loss) { + bool skip = false; -#ifdef CONFIG_DRM_AMD_DC_HDCP - result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); -#else - result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL); -#endif - if (!amdgpu_in_reset(adev) && lock_flag) - mutex_unlock(&adev->dm.dc_lock); + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_link_loss; + + if (!skip) + offload_wq->is_handling_link_loss = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + + goto out; + } + } out: if (result && !is_mst_root_connector) { @@ -2833,7 +3233,7 @@ out: dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (aconnector->fake_enable) @@ -2846,7 +3246,7 @@ out: dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } } #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -2897,6 +3297,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev) amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; } } } @@ -2994,7 +3398,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) int i; unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; - if (adev->asic_type >= CHIP_VEGA10) + if (adev->family >= AMDGPU_FAMILY_AI) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -3517,6 +3921,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -3711,6 +4118,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) int32_t primary_planes; enum dc_connection_type new_connection_type = dc_connection_none; const struct dc_plane_cap *plane; + bool psr_feature_enabled = false; dm->display_indexes_num = dm->dc->caps.max_streams; /* Update the actual used number of crtc */ @@ -3779,18 +4187,32 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_DC_DCN) /* Use Outbox interrupt */ - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_YELLOW_CARP: - case CHIP_RENOIR: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(2, 1, 0): if (register_outbox_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; } break; default: - DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type); + DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", + adev->ip_versions[DCE_HWIP][0]); + } + + /* Determine whether to enable PSR support by default. */ + if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + psr_feature_enabled = true; + break; + default: + psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; + break; + } } #endif @@ -3835,13 +4257,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - if (amdgpu_dc_feature_mask & DC_PSR_MASK) + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); + if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } } + /* + * Disable vblank IRQs aggressively for power-saving. + * + * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR + * is also supported. + */ + adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -3876,27 +4308,33 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } break; + default: #if defined(CONFIG_DRM_AMD_DC_DCN) - case CHIP_RAVEN: - case CHIP_NAVI12: - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_RENOIR: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: - if (dcn10_register_irq_handlers(dm->adev)) { - DRM_ERROR("DM: Failed to initialize IRQ\n"); + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + if (dcn10_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; + default: + DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", + adev->ip_versions[DCE_HWIP][0]); goto fail; } - break; #endif - default: - DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); - goto fail; + break; } return 0; @@ -4043,42 +4481,44 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; + default: #if defined(CONFIG_DRM_AMD_DC_DCN) - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_VANGOGH: - adev->mode_info.num_crtc = 4; - adev->mode_info.num_hpd = 4; - adev->mode_info.num_dig = 4; - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - adev->mode_info.num_crtc = 6; - adev->mode_info.num_hpd = 6; - adev->mode_info.num_dig = 6; - break; - case CHIP_YELLOW_CARP: - adev->mode_info.num_crtc = 4; - adev->mode_info.num_hpd = 4; - adev->mode_info.num_dig = 4; - break; - case CHIP_NAVI14: - case CHIP_DIMGREY_CAVEFISH: - adev->mode_info.num_crtc = 5; - adev->mode_info.num_hpd = 5; - adev->mode_info.num_dig = 5; - break; - case CHIP_BEIGE_GOBY: - adev->mode_info.num_crtc = 2; - adev->mode_info.num_hpd = 2; - adev->mode_info.num_dig = 2; - break; + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(3, 0, 0): + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case IP_VERSION(2, 0, 0): + case IP_VERSION(3, 0, 2): + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; + case IP_VERSION(2, 0, 3): + case IP_VERSION(3, 0, 3): + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 2; + adev->mode_info.num_dig = 2; + break; + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(3, 0, 1): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 4; + adev->mode_info.num_dig = 4; + break; + default: + DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", + adev->ip_versions[DCE_HWIP][0]); + return -EINVAL; + } #endif - default: - DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); - return -EINVAL; + break; } amdgpu_dm_set_irq_funcs(adev); @@ -4166,7 +4606,8 @@ static void get_min_max_dc_plane_scaling(struct drm_device *dev, } -static int fill_dc_scaling_info(const struct drm_plane_state *state, +static int fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, struct dc_scaling_info *scaling_info) { int scale_w, scale_h, min_downscale, max_upscale; @@ -4180,7 +4621,8 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, /* * For reasons we don't (yet) fully understand a non-zero * src_y coordinate into an NV12 buffer can cause a - * system hang. To avoid hangs (and maybe be overly cautious) + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) * let's reject both non-zero src_x and src_y. * * We currently know of only one use-case to reproduce a @@ -4188,10 +4630,10 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state, * is to gesture the YouTube Android app into full screen * on ChromeOS. */ - if (state->fb && - state->fb->format->format == DRM_FORMAT_NV12 && - (scaling_info->src_rect.x != 0 || - scaling_info->src_rect.y != 0)) + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) return -EINVAL; scaling_info->src_rect.width = state->src_w >> 16; @@ -4297,12 +4739,7 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, tiling_info->gfx9.num_rb_per_se = adev->gfx.config.gb_addr_config_fields.num_rb_per_se; tiling_info->gfx9.shaderEnable = 1; - if (adev->asic_type == CHIP_SIENNA_CICHLID || - adev->asic_type == CHIP_NAVY_FLOUNDER || - adev->asic_type == CHIP_DIMGREY_CAVEFISH || - adev->asic_type == CHIP_BEIGE_GOBY || - adev->asic_type == CHIP_YELLOW_CARP || - adev->asic_type == CHIP_VANGOGH) + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; } @@ -4668,6 +5105,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | @@ -4678,6 +5125,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs)); add_modifier(mods, size, capacity, AMD_FMT_MOD | @@ -4722,7 +5180,7 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u case AMDGPU_FAMILY_NV: case AMDGPU_FAMILY_VGH: case AMDGPU_FAMILY_YC: - if (adev->asic_type >= CHIP_SIENNA_CICHLID) + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) add_gfx10_3_modifiers(adev, mods, &size, &capacity); else add_gfx10_1_modifiers(adev, mods, &size, &capacity); @@ -4759,10 +5217,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, if (modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; + bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; - dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + dcc->independent_64b_blks = independent_64b_blks; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (independent_64b_blks && independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; + else if (independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else if (independent_64b_blks && !independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } else { + if (independent_64b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } address->grph.meta_addr.low_part = lower_32_bits(dcc_address); address->grph.meta_addr.high_part = upper_32_bits(dcc_address); @@ -5064,7 +5539,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, int ret; bool force_disable_dcc = false; - ret = fill_dc_scaling_info(plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); if (ret) return ret; @@ -5584,7 +6059,8 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, { stream->timing.flags.DSC = 0; - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, @@ -5592,25 +6068,95 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, } } +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps, + uint32_t max_dsc_target_bpp_limit_override) +{ + const struct dc_link_settings *verified_link_cap = NULL; + uint32_t link_bw_in_kbps; + uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + struct dc *dc = sink->ctx->dc; + struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config dsc_cfg = {0}; + + verified_link_cap = dc_link_get_link_cap(stream->link); + link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); + edp_min_bpp_x16 = 8 * 16; + edp_max_bpp_x16 = 8 * 16; + + if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) + edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + + if (edp_max_bpp_x16 < edp_min_bpp_x16) + edp_min_bpp_x16 = edp_max_bpp_x16; + + if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], + dc->debug.dsc_min_slice_height_override, + edp_min_bpp_x16, edp_max_bpp_x16, + dsc_caps, + &stream->timing, + &bw_range)) { + + if (bw_range.max_kbps < link_bw_in_kbps) { + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + 0, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; + } + return; + } + } + + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + link_bw_in_kbps, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + } +} + static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) { struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; + uint32_t max_dsc_target_bpp_limit_override = 0; + struct dc *dc = sink->ctx->dc; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); + + if (stream->link && stream->link->local_sink) + max_dsc_target_bpp_limit_override = + stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; + /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && + dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + + apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); + + } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - 0, + max_dsc_target_bpp_limit_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) { @@ -5632,7 +6178,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; } -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ /** * DOC: FreeSync Video @@ -5650,7 +6196,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, * - Cinema HFR (48 FPS) * - TV/PAL (50 FPS) * - Commonly used (60 FPS) - * - Multiples of 24 (48,72,96 FPS) + * - Multiples of 24 (48,72,96,120 FPS) * * The list of standards video format is not huge and can be added to the * connector modeset list beforehand. With that, userspace can leverage @@ -5961,6 +6507,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->force_dpms_off = cur->force_dpms_off; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -6024,21 +6571,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) return 0; #if defined(CONFIG_DRM_AMD_DC_DCN) - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; + if (dm->vblank_control_workqueue) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); - work->dm = dm; - work->acrtc = acrtc; - work->enable = enable; + INIT_WORK(&work->work, vblank_control_worker); + work->dm = dm; + work->acrtc = acrtc; + work->enable = enable; - if (acrtc_state->stream) { - dc_stream_retain(acrtc_state->stream); - work->stream = acrtc_state->stream; - } + if (acrtc_state->stream) { + dc_stream_retain(acrtc_state->stream); + work->stream = acrtc_state->stream; + } - queue_work(dm->vblank_control_workqueue, &work->work); + queue_work(dm->vblank_control_workqueue, &work->work); + } #endif return 0; @@ -6792,15 +7341,16 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { #if defined(CONFIG_DRM_AMD_DC_DCN) static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { struct dc_stream_state *stream = NULL; struct drm_connector *connector; struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock, bpp; - int vcpi, pbn_div, pbn = 0; + int i, j; + int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -6828,7 +7378,24 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, if (!stream) continue; + pbn_div = dm_mst_get_pbn_divider(stream->link); + /* pbn is calculated by compute_mst_dsc_configs_for_state*/ + for (j = 0; j < dc_state->stream_count; j++) { + if (vars[j].aconnector == aconnector) { + pbn = vars[j].pbn; + break; + } + } + + if (j == dc_state->stream_count) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); + if (stream->timing.flags.DSC != 1) { + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = slot_num; + drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, @@ -6837,10 +7404,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, continue; } - pbn_div = dm_mst_get_pbn_divider(stream->link); - bpp = stream->timing.dsc_cfg.bits_per_pixel; - clock = stream->timing.pix_clk_100hz / 10; - pbn = drm_dp_calc_pbn_mode(clock, bpp, true); vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7104,7 +7667,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - ret = fill_dc_scaling_info(new_plane_state, &scaling_info); + ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); if (ret) return ret; @@ -7519,6 +8082,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, } } +static void amdgpu_set_panel_orientation(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + const struct drm_display_mode *native_mode; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && + connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + return; + + encoder = amdgpu_dm_connector_to_encoder(connector); + if (!encoder) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + native_mode = &amdgpu_encoder->native_mode; + if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) + return; + + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + native_mode->hdisplay, + native_mode->vdisplay); +} + static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, struct edid *edid) { @@ -7547,6 +8136,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); + + amdgpu_set_panel_orientation(connector); } else { amdgpu_dm_connector->num_modes = 0; } @@ -7574,19 +8165,19 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) /* Standard FPS values * - * 23.976 - TV/NTSC - * 24 - Cinema - * 25 - TV/PAL - * 29.97 - TV/NTSC - * 30 - TV/NTSC - * 48 - Cinema HFR - * 50 - TV/PAL - * 60 - Commonly used - * 48,72,96 - Multiples of 24 + * 23.976 - TV/NTSC + * 24 - Cinema + * 25 - TV/PAL + * 29.97 - TV/NTSC + * 30 - TV/NTSC + * 48 - Cinema HFR + * 50 - TV/PAL + * 60 - Commonly used + * 48,72,96,120 - Multiples of 24 */ static const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000, - 48000, 50000, 60000, 72000, 96000 + 48000, 50000, 60000, 72000, 96000, 120000 }; /* @@ -7714,7 +8305,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - aconnector->base.ycbcr_420_allowed = + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link->link_enc = + link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (!link->link_enc) + link->link_enc = + link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } + + if (link->link_enc) + aconnector->base.ycbcr_420_allowed = link->link_enc->features.dp_ycbcr420_supported ? true : false; break; case DRM_MODE_CONNECTOR_DVID: @@ -7829,7 +8430,8 @@ create_i2c(struct ddc_service *ddc_service, snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); i2c_set_adapdata(&i2c->base, i2c); i2c->ddc_service = ddc_service; - i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; + if (i2c->ddc_service->ddc_pin) + i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; return i2c; } @@ -8058,8 +8660,26 @@ static bool is_content_protection_different(struct drm_connector_state *state, state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; - /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled - * hot-plug, headless s3, dpms + /* Stream removed and re-enabled + * + * Can sometimes overlap with the HPD case, + * thus set update_hdcp to false to avoid + * setting HDCP multiple times. + * + * Handles: DESIRED -> DESIRED (Special case) + */ + if (!(old_state->crtc && old_state->crtc->enabled) && + state->crtc && state->crtc->enabled && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + dm_con_state->update_hdcp = false; + return true; + } + + /* Hot-plug, headless s3, dpms + * + * Only start HDCP if the display is connected/enabled. + * update_hdcp flag will be set to false until the next + * HPD comes in. * * Handles: DESIRED -> DESIRED (Special case) */ @@ -8495,7 +9115,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; } - fill_dc_scaling_info(new_plane_state, + fill_dc_scaling_info(dm->adev, new_plane_state, &bundle->scaling_infos[planes_count]); bundle->surface_updates[planes_count].scaling_info = @@ -8622,7 +9242,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0) { + acrtc_state->active_planes > 0 && + !acrtc_state->force_dpms_off) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -8648,7 +9269,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * If PSR or idle optimizations are enabled then flush out * any pending work before hardware programming. */ - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif bundle->stream_update.stream = acrtc_state->stream; @@ -8983,7 +9605,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* if there mode set or reset, disable eDP PSR */ if (mode_set_reset_required) { #if defined(CONFIG_DRM_AMD_DC_DCN) - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif amdgpu_dm_psr_disable_all(dm); } @@ -10097,18 +10720,18 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, struct drm_crtc *crtc, struct drm_crtc_state *new_crtc_state) { - struct drm_plane_state *new_cursor_state, *new_primary_state; - int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h; + struct drm_plane *cursor = crtc->cursor, *underlying; + struct drm_plane_state *new_cursor_state, *new_underlying_state; + int i; + int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; /* On DCE and DCN there is no dedicated hardware cursor plane. We get a * cursor per pipe but it's going to inherit the scaling and * positioning from the underlying pipe. Check the cursor plane's - * blending properties match the primary plane's. */ + * blending properties match the underlying planes'. */ - new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); - new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary); - if (!new_cursor_state || !new_primary_state || - !new_cursor_state->fb || !new_primary_state->fb) { + new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); + if (!new_cursor_state || !new_cursor_state->fb) { return 0; } @@ -10117,15 +10740,34 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, cursor_scale_h = new_cursor_state->crtc_h * 1000 / (new_cursor_state->src_h >> 16); - primary_scale_w = new_primary_state->crtc_w * 1000 / - (new_primary_state->src_w >> 16); - primary_scale_h = new_primary_state->crtc_h * 1000 / - (new_primary_state->src_h >> 16); + for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { + /* Narrow down to non-cursor planes on the same CRTC as the cursor */ + if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) + continue; - if (cursor_scale_w != primary_scale_w || - cursor_scale_h != primary_scale_h) { - drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); - return -EINVAL; + /* Ignore disabled planes */ + if (!new_underlying_state->fb) + continue; + + underlying_scale_w = new_underlying_state->crtc_w * 1000 / + (new_underlying_state->src_w >> 16); + underlying_scale_h = new_underlying_state->crtc_h * 1000 / + (new_underlying_state->src_h >> 16); + + if (cursor_scale_w != underlying_scale_w || + cursor_scale_h != underlying_scale_h) { + drm_dbg_atomic(crtc->dev, + "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", + cursor->base.id, cursor->name, underlying->base.id, underlying->name); + return -EINVAL; + } + + /* If this plane covers the whole CRTC, no need to check planes underneath */ + if (new_underlying_state->crtc_x <= 0 && + new_underlying_state->crtc_y <= 0 && + new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && + new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) + break; } return 0; @@ -10156,53 +10798,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm } #endif -static int validate_overlay(struct drm_atomic_state *state) -{ - int i; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct drm_plane_state *primary_state, *overlay_state = NULL; - - /* Check if primary plane is contained inside overlay */ - for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) { - if (plane->type == DRM_PLANE_TYPE_OVERLAY) { - if (drm_atomic_plane_disabling(plane->state, new_plane_state)) - return 0; - - overlay_state = new_plane_state; - continue; - } - } - - /* check if we're making changes to the overlay plane */ - if (!overlay_state) - return 0; - - /* check if overlay plane is enabled */ - if (!overlay_state->crtc) - return 0; - - /* find the primary plane for the CRTC that the overlay is enabled on */ - primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); - if (IS_ERR(primary_state)) - return PTR_ERR(primary_state); - - /* check if primary plane is enabled */ - if (!primary_state->crtc) - return 0; - - /* Perform the bounds check to ensure the overlay plane covers the primary */ - if (primary_state->crtc_x < overlay_state->crtc_x || - primary_state->crtc_y < overlay_state->crtc_y || - primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || - primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { - DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); - return -EINVAL; - } - - return 0; -} - /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. * @dev: The DRM device @@ -10243,12 +10838,19 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int ret, i; bool lock_and_validation_needed = false; struct dm_crtc_state *dm_old_crtc_state; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dsc_mst_fairness_vars vars[MAX_PIPES]; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mgr; +#endif trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); goto fail; + } /* Check connector changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10264,6 +10866,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { + DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } @@ -10278,8 +10881,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); goto fail; + } } } } @@ -10294,19 +10899,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, continue; ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; + } if (!new_crtc_state->enable) continue; ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) - return ret; + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); + goto fail; + } ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); goto fail; + } if (dm_old_crtc_state->dsc_force_changed) new_crtc_state->mode_changed = true; @@ -10343,6 +10954,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); + DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); goto fail; } } @@ -10355,8 +10967,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Disable all crtcs which require disable */ @@ -10366,8 +10980,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Enable all crtcs which require enable */ @@ -10377,14 +10993,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); goto fail; + } } - ret = validate_overlay(state); - if (ret) - goto fail; - /* Add new/modified planes */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, @@ -10392,20 +11006,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); goto fail; + } /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); goto fail; + } } if (state->legacy_cursor_update) { @@ -10450,6 +11070,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* set the slot info for each mst_state based on the link encoding format */ + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + u8 link_coding_cap; + + if (!mgr->mst_state ) + continue; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + int id = connector->index; + + if (id == mst_state->mgr->conn_base_id) { + aconnector = to_amdgpu_dm_connector(connector); + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); + drm_dp_mst_update_slots(mst_state, link_coding_cap); + + break; + } + } + drm_connector_list_iter_end(&iter); + + } +#endif /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through @@ -10465,20 +11112,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); goto fail; + } ret = do_aquire_global_lock(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); goto fail; + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); goto fail; + } - ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); - if (ret) + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; + } #endif /* @@ -10488,11 +11143,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * to get stuck in an infinite loop and hang eventually. */ ret = drm_dp_mst_atomic_check(state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); goto fail; - status = dc_validate_global_state(dc, dm_state->context, false); + } + status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { - DC_LOG_WARNING("DC global validation failure: %s (%d)", + DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -10756,6 +11413,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; + struct dc_sink *sink; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -10767,28 +11425,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, goto update; } - if (!edid) { + sink = amdgpu_dm_connector->dc_sink ? + amdgpu_dm_connector->dc_sink : + amdgpu_dm_connector->dc_em_sink; + + if (!edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->pixel_clock_mhz = 0; + connector->display_info.monitor_range.min_vfreq = 0; + connector->display_info.monitor_range.max_vfreq = 0; + freesync_capable = false; goto update; } dm_con_state = to_dm_connector_state(connector->state); - if (!amdgpu_dm_connector->dc_sink) { - DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); - goto update; - } if (!adev->dm.freesync_module) goto update; - if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; if (edid) { @@ -10835,7 +11496,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, freesync_capable = true; } } - } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { timing = &edid->detailed_timings[i]; @@ -10917,29 +11578,96 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, - struct aux_payload *payload, enum aux_return_code_type *operation_result) +int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx, + uint8_t status_type, uint32_t *operation_result) +{ + struct amdgpu_device *adev = ctx->driver_context; + int return_status = -1; + struct dmub_notification *p_notify = adev->dm.dmub_notify; + + if (is_cmd_aux) { + if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { + return_status = p_notify->aux_reply.length; + *operation_result = p_notify->result; + } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { + *operation_result = AUX_RET_ERROR_TIMEOUT; + } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + } else { + *operation_result = AUX_RET_ERROR_UNKNOWN; + } + } else { + if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { + return_status = 0; + *operation_result = p_notify->sc_status; + } else { + *operation_result = SET_CONFIG_UNKNOWN_ERROR; + } + } + + return return_status; +} + +int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, + unsigned int link_index, void *cmd_payload, void *operation_result) { struct amdgpu_device *adev = ctx->driver_context; int ret = 0; - dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload); - ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ); + if (is_cmd_aux) { + dc_process_dmub_aux_transfer_async(ctx->dc, + link_index, (struct aux_payload *)cmd_payload); + } else if (dc_process_dmub_set_config_async(ctx->dc, link_index, + (struct set_config_cmd_payload *)cmd_payload, + adev->dm.dmub_notify)) { + return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, + ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, + (uint32_t *)operation_result); + } + + ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); if (ret == 0) { - *operation_result = AUX_RET_ERROR_TIMEOUT; - return -1; + DRM_ERROR("wait_for_completion_timeout timeout!"); + return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, + ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, + (uint32_t *)operation_result); } - *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result; - if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { - (*payload->reply) = adev->dm.dmub_notify->aux_reply.command; + if (is_cmd_aux) { + if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { + struct aux_payload *payload = (struct aux_payload *)cmd_payload; - // For read case, Copy data to payload - if (!payload->write && adev->dm.dmub_notify->aux_reply.length && - (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK)) - memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, - adev->dm.dmub_notify->aux_reply.length); + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; + if (!payload->write && adev->dm.dmub_notify->aux_reply.length && + payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { + memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, + adev->dm.dmub_notify->aux_reply.length); + } + } + } + + return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, + ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, + (uint32_t *)operation_result); +} + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VANGOGH: + if (!adev->mman.keep_stolen_vga_memory) + return true; + break; + default: + break; } - return adev->dm.dmub_notify->aux_reply.length; + return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d1d353a7c77d..bb65f41d1a59 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -47,6 +47,15 @@ #define AMDGPU_DM_MAX_CRTC 6 #define AMDGPU_DM_MAX_NUM_EDP 2 + +#define AMDGPU_DMUB_NOTIFICATION_MAX 5 + +/** + * DMUB Async to Sync Mechanism Status + **/ +#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 +#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 +#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 /* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" @@ -86,6 +95,21 @@ struct dm_compressor_info { uint64_t gpu_addr; }; +typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); + +/** + * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ + * + * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq + * @dmub_notify: notification for callback function + * @adev: amdgpu_device pointer + */ +struct dmub_hpd_work { + struct work_struct handle_hpd_work; + struct dmub_notification *dmub_notify; + struct amdgpu_device *adev; +}; + /** * struct vblank_control_work - Work data for vblank control * @work: Kernel work data for the work event @@ -155,6 +179,48 @@ struct dal_allocation { }; /** + * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq + * offload work + */ +struct hpd_rx_irq_offload_work_queue { + /** + * @wq: workqueue structure to queue offload work. + */ + struct workqueue_struct *wq; + /** + * @offload_lock: To protect fields of offload work queue. + */ + spinlock_t offload_lock; + /** + * @is_handling_link_loss: Used to prevent inserting link loss event when + * we're handling link loss + */ + bool is_handling_link_loss; + /** + * @aconnector: The aconnector that this work queue is attached to + */ + struct amdgpu_dm_connector *aconnector; +}; + +/** + * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure + */ +struct hpd_rx_irq_offload_work { + /** + * @work: offload work + */ + struct work_struct work; + /** + * @data: reference irq data which is used while handling offload work + */ + union hpd_irq_data data; + /** + * @offload_wq: offload work queue that this work is queued to + */ + struct hpd_rx_irq_offload_work_queue *offload_wq; +}; + +/** * struct amdgpu_display_manager - Central amdgpu display manager device * * @dc: Display Core control structure @@ -190,9 +256,31 @@ struct amdgpu_display_manager { */ struct dmub_srv *dmub_srv; + /** + * @dmub_notify: + * + * Notification from DMUB. + */ + struct dmub_notification *dmub_notify; /** + * @dmub_callback: + * + * Callback functions to handle notification from DMUB. + */ + + dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** + * @dmub_thread_offload: + * + * Flag to indicate if callback is offload. + */ + + bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** * @dmub_fb_info: * * Framebuffer regions for the DMUB. @@ -422,7 +510,12 @@ struct amdgpu_display_manager { */ struct crc_rd_work *crc_rd_wrk; #endif - + /** + * @hpd_rx_offload_wq: + * + * Work queue to offload works of hpd_rx_irq + */ + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; /** * @mst_encoders: * @@ -439,6 +532,7 @@ struct amdgpu_display_manager { */ struct list_head da_list; struct completion dmub_aux_transfer_done; + struct workqueue_struct *delayed_hpd_wq; /** * @brightness: @@ -542,6 +636,8 @@ struct dm_crtc_state { bool dsc_force_changed; bool vrr_supported; + + bool force_dpms_off; struct mod_freesync_config freesync_config; struct dc_info_packet vrr_infopacket; @@ -632,6 +728,10 @@ void amdgpu_dm_update_connector_after_detect( extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; -int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, - struct aux_payload *payload, enum aux_return_code_type *operation_result); +int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, + struct dc_context *ctx, unsigned int link_index, + void *payload, void *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = -EINVAL; goto cleanup; } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 87daa78a32b8..0277685864c5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -78,12 +78,10 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, wr_buf_ptr = wr_buf; - r = copy_from_user(wr_buf_ptr, buf, wr_buf_size); - - /* r is bytes not be copied */ - if (r >= wr_buf_size) { - DRM_DEBUG_DRIVER("user data not be read\n"); - return -EINVAL; + /* r is bytes not be copied */ + if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { + DRM_DEBUG_DRIVER("user data could not be read successfully\n"); + return -EFAULT; } /* check number of parameters. isspace could not differ space and \n */ @@ -247,6 +245,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, { struct amdgpu_dm_connector *connector = file_inode(f)->i_private; struct dc_link *link = connector->dc_link; + struct dc *dc = (struct dc *)link->dc; struct dc_link_settings prefer_link_settings; char *wr_buf = NULL; const uint32_t wr_buf_size = 40; @@ -263,7 +262,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -293,6 +292,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, case LINK_RATE_RBR2: case LINK_RATE_HIGH2: case LINK_RATE_HIGH3: +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR10: +#endif break; default: valid_input = false; @@ -313,7 +315,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, prefer_link_settings.lane_count = param[0]; prefer_link_settings.link_rate = param[1]; - dp_retrain_link_dp_test(link, &prefer_link_settings, false); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); kfree(wr_buf); return size; @@ -378,9 +380,9 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, return -EINVAL; snprintf(rd_buf, rd_buf_size, " %d %d %d\n", - link->cur_lane_setting.VOLTAGE_SWING, - link->cur_lane_setting.PRE_EMPHASIS, - link->cur_lane_setting.POST_CURSOR2); + link->cur_lane_setting[0].VOLTAGE_SWING, + link->cur_lane_setting[0].PRE_EMPHASIS, + link->cur_lane_setting[0].POST_CURSOR2); while (size) { if (*pos >= rd_buf_size) @@ -487,7 +489,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -639,7 +641,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us if (!wr_buf) return -ENOSPC; - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -732,7 +734,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us } for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) - link_training_settings.lane_settings[i] = link->cur_lane_setting; + link_training_settings.lane_settings[i] = link->cur_lane_setting[i]; dc_link_set_test_pattern( link, @@ -914,7 +916,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, ¶m, buf, max_param_num, ¶m_nums)) { @@ -1211,7 +1213,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1241,7 +1243,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } else if (param[0] == 0) { if (!aconnector->dc_link) goto unlock; @@ -1263,7 +1265,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf, dm_restore_drm_connector_state(dev, connector); drm_modeset_unlock_all(dev); - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); } unlock: @@ -1396,7 +1398,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1581,7 +1583,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1766,7 +1768,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -1944,7 +1946,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { @@ -2382,7 +2384,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, return -ENOSPC; } - if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c5f1dc3b5961..5bfdc66b5867 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct drm_connector_state *conn_state; + struct dc_sink *sink = NULL; + bool link_is_hdcp14 = false; if (config->dpms_off) { hdcp_remove_display(hdcp_work, link_index, aconnector); @@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->index = aconnector->base.index; display->state = MOD_HDCP_DISPLAY_ACTIVE; - if (aconnector->dc_sink != NULL) - link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + if (aconnector->dc_sink) + sink = aconnector->dc_sink; + else if (aconnector->dc_em_sink) + sink = aconnector->dc_em_sink; + + if (sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; display->dig_fe = config->dig_fe; @@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->stream_enc_idx = config->stream_enc_idx; link->link_enc_idx = config->link_enc_idx; link->phy_idx = config->phy_idx; - link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link, - aconnector->dc_sink->sink_signal) ? 1 : 0; + if (sink) + link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); + link->hdcp_supported_informational = link_is_hdcp14; link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.assr_enabled = config->assr_enabled; link->dp.mst_enabled = config->mst_enabled; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 6fee12c91ef5..72a2e84645df 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -40,6 +40,39 @@ #include "dm_helpers.h" +struct monitor_patch_info { + unsigned int manufacturer_id; + unsigned int product_id; + void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); + unsigned int patch_param; +}; +static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); + +static const struct monitor_patch_info monitor_patch_table[] = { +{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, +{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, +}; + +static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) +{ + if (edid_caps) + edid_caps->panel_patch.max_dsc_target_bpp_limit = param; +} + +static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) + if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) + && (edid_caps->product_id == monitor_patch_table[i].product_id)) { + monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); + ret++; + } + + return ret; +} + /* dm_helpers_parse_edid_caps * * Parse edid caps @@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps( kfree(sads); kfree(sadb); + amdgpu_dm_patch_edid_caps(edid_caps); + return result; } @@ -184,6 +219,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; bool ret; + u8 link_coding_cap = DP_8b_10b_ENCODING; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation @@ -203,6 +239,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_port = aconnector->port; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); +#endif + if (enable) { ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, @@ -216,7 +256,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( } /* It's OK for this to fail */ - drm_dp_update_payload_part1(mst_mgr); + drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each @@ -544,7 +584,7 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); } - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); } @@ -648,8 +688,21 @@ int dm_helper_dmub_aux_transfer_sync( struct aux_payload *payload, enum aux_return_code_type *operation_result) { - return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result); + return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, + link->link_index, (void *)payload, + (void *)operation_result); } + +int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, + const struct dc_link *link, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result) +{ + return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, + link->link_index, (void *)payload, + (void *)operation_result); +} + void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) { /* TODO: something */ @@ -751,3 +804,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) &new_downspread.raw, sizeof(new_downspread)); } + +#if defined(CONFIG_DRM_AMD_DC_DCN) +void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) +{ + // FPGA programming for this clock in diags framework that + // needs to go through dm layer, therefore leave dummy interace here +} + + +void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) +{ + /* TODO: add peridic detection implementation */ +} +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1bcba6943fd7..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h" #include "i2caux_interface.h" #include "dmub_cmd.h" @@ -64,6 +66,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; payload.write = (msg->request & DP_AUX_I2C_READ) == 0; payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; + payload.write_status_update = + (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, @@ -155,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; @@ -164,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) + /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -174,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ - - if (!aconnector->dsc_aux && !port->parent->port_parent) + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif + if (!aconnector->dsc_aux) return false; @@ -518,12 +532,7 @@ struct dsc_mst_fairness_params { uint32_t num_slices_h; uint32_t num_slices_v; uint32_t bpp_overwrite; -}; - -struct dsc_mst_fairness_vars { - int pbn; - bool dsc_enabled; - int bpp_x16; + struct amdgpu_dm_connector *aconnector; }; static int kbps_to_peak_pbn(int kbps) @@ -537,17 +546,18 @@ static int kbps_to_peak_pbn(int kbps) static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; for (i = 0; i < count; i++) { memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); - if (vars[i].dsc_enabled && dc_dsc_compute_config( + if (vars[i + k].dsc_enabled && dc_dsc_compute_config( params[i].sink->ctx->dc->res_pool->dscs[0], ¶ms[i].sink->dsc_caps.dsc_dec_caps, params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, - 0, + params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 0, params[i].timing, ¶ms[i].timing->dsc_cfg)) { @@ -556,7 +566,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p if (params[i].bpp_overwrite) params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; else - params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16; + params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; if (params[i].num_slices_h) params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; @@ -579,7 +589,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) param.sink->ctx->dc->res_pool->dscs[0], ¶m.sink->dsc_caps.dsc_dec_caps, param.sink->ctx->dc->debug.dsc_min_slice_height_override, - 0, + param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, (int) kbps, param.timing, &dsc_config); return dsc_config.bits_per_pixel; @@ -589,7 +599,8 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool bpp_increased[MAX_PIPES]; @@ -604,8 +615,9 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled) { - initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn; + if (vars[i + k].dsc_enabled) { + initial_slack[i] = + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -632,7 +644,7 @@ static void increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; @@ -685,7 +697,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, - int count) + int count, + int k) { int i; bool tried[MAX_PIPES]; @@ -695,8 +708,8 @@ static void try_disable_dsc(struct drm_atomic_state *state, int remaining_to_try = 0; for (i = 0; i < count; i++) { - if (vars[i].dsc_enabled - && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + if (vars[i + k].dsc_enabled + && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; tried[i] = false; @@ -750,12 +763,13 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, - struct dc_link *dc_link) + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars, + int *link_vars_start_index) { - int i; + int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; - struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; int count = 0; bool debugfs_overwrite = false; @@ -771,11 +785,18 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (stream->link != dc_link) continue; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + if (!aconnector->port) + continue; + stream->timing.flags.DSC = 0; params[count].timing = &stream->timing; params[count].sink = stream->sink; - aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) @@ -796,43 +817,55 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, count++; } + + if (count == 0) { + ASSERT(0); + return true; + } + + /* k is start index of vars for current phy link used by mst hub */ + k = *link_vars_start_index; + /* set vars start index for next mst hub phy link */ + *link_vars_start_index += count; + /* Try no compression */ for (i = 0; i < count; i++) { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].aconnector = params[i].aconnector; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); - vars[i].dsc_enabled = true; - vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } else { - vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); - vars[i].dsc_enabled = false; - vars[i].bpp_x16 = 0; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; if (drm_dp_atomic_find_vcpi_slots(state, params[i].port->mgr, params[i].port, - vars[i].pbn, + vars[i + k].pbn, dm_mst_get_pbn_divider(dc_link)) < 0) return false; } @@ -841,22 +874,85 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - increase_dsc_bpp(state, dc_link, params, vars, count); + increase_dsc_bpp(state, dc_link, params, vars, count, k); - try_disable_dsc(state, dc_link, params, vars, count); + try_disable_dsc(state, dc_link, params, vars, count, k); - set_dsc_configs_from_fairness_vars(params, vars, count); + set_dsc_configs_from_fairness_vars(params, vars, count, k); return true; } +static bool is_dsc_need_re_compute( + struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i; + bool is_dsc_need_re_compute = false; + + /* only check phy used by mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + /* check if there is mode change in new request */ + for (i = 0; i < dc_state->stream_count; i++) { + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + struct drm_crtc_state *new_crtc_state; + struct drm_connector_state *new_conn_state; + + stream = dc_state->streams[i]; + + if (!stream) + continue; + + /* check if stream using the same link for mst */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; + if (!aconnector) + continue; + + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + + if (!new_conn_state) + continue; + + if (IS_ERR(new_conn_state)) + continue; + + if (!new_conn_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + + if (!new_crtc_state) + continue; + + if (IS_ERR(new_crtc_state)) + continue; + + if (new_crtc_state->enable && new_crtc_state->active) { + if (new_crtc_state->mode_changed || new_crtc_state->active_changed || + new_crtc_state->connectors_changed) + is_dsc_need_re_compute = true; + } + } + + return is_dsc_need_re_compute; +} + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; bool computed_streams[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + int link_vars_start_index = 0; for (i = 0; i < dc_state->stream_count; i++) computed_streams[i] = false; @@ -881,8 +977,12 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) return false; + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, + vars, &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index b38bd68121ce..900d3f7a8498 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -39,8 +39,17 @@ void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); #if defined(CONFIG_DRM_AMD_DC_DCN) + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; + struct amdgpu_dm_connector *aconnector; +}; + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state); + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); #endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 70a554f1e725..c022e56f9459 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -107,6 +107,8 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ // Init fail safe of 2 frames static unsigned int num_frames_static = 2; + unsigned int power_opt = 0; + bool psr_enable = true; DRM_DEBUG_DRIVER("Enabling psr...\n"); @@ -133,7 +135,9 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) &stream, 1, ¶ms); - return dc_link_set_psr_allow_active(link, true, false, false); + power_opt |= psr_power_opt_z10_static_screen; + + return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); } /* @@ -144,10 +148,12 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) */ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) { + unsigned int power_opt = 0; + bool psr_enable = false; DRM_DEBUG_DRIVER("Disabling psr...\n"); - return dc_link_set_psr_allow_active(stream->link, false, true, false); + return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index c9f47d167472..ab0c6d191038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -52,7 +52,7 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth); * This function tells if the code is already under FPU protection or not. A * function that works as an API for a set of FPU operations can use this * function for checking if the caller invoked it after DC_FP_START(). For - * example, take a look at dcn2x.c file. + * example, take a look at dcn20_fpu.c file. */ inline void dc_assert_fp_enabled(void) { @@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void) depth = *pcpu; put_cpu_ptr(&fpu_recursion_depth); - ASSERT(depth > 1); + ASSERT(depth >= 1); } /** diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 943fcb164876..b1f0d6260226 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -30,6 +30,7 @@ DC_LIBS += dcn20 DC_LIBS += dsc DC_LIBS += dcn10 dml DC_LIBS += dcn21 +DC_LIBS += dcn201 DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn302 @@ -58,7 +59,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ -dc_link_enc_cfg.o dc_link_dpcd.o +dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o ifdef CONFIG_DRM_AMD_DC_DCN DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 6dbde74c1e06..1e385d55e7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -99,6 +99,10 @@ static enum bp_result get_firmware_info_v3_2( struct bios_parser *bp, struct dc_firmware_info *info); +static enum bp_result get_firmware_info_v3_4( + struct bios_parser *bp, + struct dc_firmware_info *info); + static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); @@ -1426,8 +1430,10 @@ static enum bp_result bios_parser_get_firmware_info( break; case 2: case 3: - case 4: result = get_firmware_info_v3_2(bp, info); + break; + case 4: + result = get_firmware_info_v3_4(bp, info); break; default: break; @@ -1575,6 +1581,88 @@ static enum bp_result get_firmware_info_v3_2( return BP_RESULT_OK; } +static enum bp_result get_firmware_info_v3_4( + struct bios_parser *bp, + struct dc_firmware_info *info) +{ + struct atom_firmware_info_v3_4 *firmware_info; + struct atom_common_table_header *header; + struct atom_data_revision revision; + struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL; + struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL; + if (!info) + return BP_RESULT_BADINPUT; + + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4, + DATA_TABLES(firmwareinfo)); + + if (!firmware_info) + return BP_RESULT_BADBIOSTABLE; + + memset(info, 0, sizeof(*info)); + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(dce_info)); + + get_atom_data_table_revision(header, &revision); + + switch (revision.major) { + case 4: + switch (revision.minor) { + case 4: + dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, + DATA_TABLES(dce_info)); + + if (!dce_info_v4_4) + return BP_RESULT_BADBIOSTABLE; + + /* 100MHz expected */ + info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10; + info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10; + /* 50MHz expected */ + info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10; + + /* Get SMU Display PLL VCO Frequency in KHz*/ + info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10; + break; + + default: + /* should not come here, keep as backup, as was before */ + dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1, + DATA_TABLES(dce_info)); + + if (!dce_info_v4_1) + return BP_RESULT_BADBIOSTABLE; + + info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10; + info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10; + info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10; + break; + } + break; + + default: + ASSERT(0); + break; + } + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(smu_info)); + get_atom_data_table_revision(header, &revision); + + // We need to convert from 10KHz units into KHz units. + info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; + + if (firmware_info->board_i2c_feature_id == 0x2) { + info->oem_i2c_present = true; + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; + } else { + info->oem_i2c_present = false; + } + + return BP_RESULT_OK; +} + static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, @@ -1604,6 +1692,16 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; info->HDMI_6GB_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + info->IS_DP2_CAPABLE = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0; + info->DP_UHBR10_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0; + info->DP_UHBR13_5_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0; + info->DP_UHBR20_EN = (record->encodercaps & + ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0; +#endif info->DP_IS_USB_C = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; @@ -2223,6 +2321,8 @@ static enum bp_result get_integrated_info_v2_2( info->ext_disp_conn_info.checksum = info_v2_2->extdispconninfo.checksum; + info->ext_disp_conn_info.fixdpvoltageswing = + info_v2_2->extdispconninfo.fixdpvoltageswing; info->edp1_info.edp_backlight_pwm_hz = le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz); @@ -2895,7 +2995,7 @@ static bool bios_parser2_construct( &bp->object_info_tbl.revision); if (bp->object_info_tbl.revision.major == 1 - && bp->object_info_tbl.revision.minor >= 4) { + && bp->object_info_tbl.revision.minor == 4) { struct display_object_info_table_v1_4 *tbl_v1_4; tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4, @@ -2904,8 +3004,10 @@ static bool bios_parser2_construct( return false; bp->object_info_tbl.v1_4 = tbl_v1_4; - } else + } else { + ASSERT(0); return false; + } dal_firmware_parser_init_cmd_tbl(bp); dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index f1f672a997d7..9afa5eb2e6d3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -44,9 +44,7 @@ bp->base.ctx->logger #define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\ - (((char *)(&((\ - struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\ - ->FieldName)-(char *)0)/sizeof(uint16_t)) + (offsetof(struct atom_master_list_of_##MasterOrData##_functions_v2_1, FieldName) / sizeof(uint16_t)) #define EXEC_BIOS_CMD_TABLE(fname, params)\ (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ @@ -340,6 +338,13 @@ static enum bp_result transmitter_control_v1_7( const struct command_table_helper *cmd = bp->cmd_helper; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0; + + if (dc_is_dp_signal(cntl->signal)) + hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0; +#endif + dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.action = (uint8_t)cntl->action; @@ -353,6 +358,9 @@ static enum bp_result transmitter_control_v1_7( dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dig_v1_7.HPO_instance = hpo_instance; +#endif dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index cb3fd44cb1ed..eedc553f340e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -70,6 +70,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCN_VERSION_1_01: case DCN_VERSION_2_0: case DCN_VERSION_2_1: + case DCN_VERSION_2_01: case DCN_VERSION_3_0: case DCN_VERSION_3_01: case DCN_VERSION_3_02: diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 0e18df1283b6..c8b0a2f05b4d 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu( struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; - struct _vcs_dpi_display_rq_params_st rq_param = {0}; - struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0}; - struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } }; + struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param; + struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param; + struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input; float total_active_bw = 0; float total_prefetch_bw = 0; int total_flip_bytes = 0; @@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu( memset(dlg_regs, 0, sizeof(*dlg_regs)); memset(ttu_regs, 0, sizeof(*ttu_regs)); memset(rq_regs, 0, sizeof(*rq_regs)); + memset(rq_param, 0, sizeof(*rq_param)); + memset(dlg_sys_param, 0, sizeof(*dlg_sys_param)); + memset(input, 0, sizeof(*input)); for (i = 0; i < number_of_planes; i++) { total_active_bw += v->read_bandwidth[i]; total_prefetch_bw += v->prefetch_bandwidth[i]; total_flip_bytes += v->total_immediate_flip_bytes[i]; } - dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); - if (dlg_sys_param.total_flip_bw < 0.0) - dlg_sys_param.total_flip_bw = 0; - - dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark; - dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark; - dlg_sys_param.t_urg_wm_us = v->urgent_watermark; - dlg_sys_param.t_extra_us = v->urgent_extra_latency; - dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; - dlg_sys_param.total_flip_bytes = total_flip_bytes; - - pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe); - input.clks_cfg.dcfclk_mhz = v->dcfclk; - input.clks_cfg.dispclk_mhz = v->dispclk; - input.clks_cfg.dppclk_mhz = v->dppclk; - input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - input.clks_cfg.socclk_mhz = v->socclk; - input.clks_cfg.voltage = v->voltage_level; + dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); + if (dlg_sys_param->total_flip_bw < 0.0) + dlg_sys_param->total_flip_bw = 0; + + dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark; + dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark; + dlg_sys_param->t_urg_wm_us = v->urgent_watermark; + dlg_sys_param->t_extra_us = v->urgent_extra_latency; + dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; + dlg_sys_param->total_flip_bytes = total_flip_bytes; + + pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe); + input->clks_cfg.dcfclk_mhz = v->dcfclk; + input->clks_cfg.dispclk_mhz = v->dispclk; + input->clks_cfg.dppclk_mhz = v->dppclk; + input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + input->clks_cfg.socclk_mhz = v->socclk; + input->clks_cfg.voltage = v->voltage_level; // dc->dml.logger = pool->base.logger; - input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; - input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; + input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; + input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; //input[in_idx].dout.output_standard; /*todo: soc->sr_enter_plus_exit_time??*/ - dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; + dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; - dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src); + dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); dml1_extract_rq_regs(dml, rq_regs, rq_param); dml1_rq_dlg_get_dlg_params( dml, dlg_regs, ttu_regs, - rq_param.dlg, + &rq_param->dlg, dlg_sys_param, input, true, @@ -760,7 +763,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw return 4; } -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index 7fa0b007a7ea..6bd73e49a6d2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -94,6 +94,15 @@ AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DC AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) ############################################################################### +# DCN201 +############################################################################### +CLK_MGR_DCN201 = dcn201_clk_mgr.o + +AMD_DAL_CLK_MGR_DCN201 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn201/,$(CLK_MGR_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN201) + +############################################################################### # DCN21 ############################################################################### CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index bb31541f8072..9200c8ce02ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -39,6 +39,7 @@ #include "dcn10/rv2_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h" #include "dcn21/rn_clk_mgr.h" +#include "dcn201/dcn201_clk_mgr.h" #include "dcn30/dcn30_clk_mgr.h" #include "dcn301/vg_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" @@ -99,11 +100,13 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m if (edp_num) { for (panel_inst = 0; panel_inst < edp_num; panel_inst++) { + bool allow_active = false; + edp_link = edp_links[panel_inst]; if (!edp_link->psr_settings.psr_feature_enabled) continue; clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active; - dc_link_set_psr_allow_active(edp_link, false, false, false); + dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL); } } @@ -123,7 +126,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) if (!edp_link->psr_settings.psr_feature_enabled) continue; dc_link_set_psr_allow_active(edp_link, - clk_mgr->psr_allow_active_cache, false, false); + &clk_mgr->psr_allow_active_cache, false, false, NULL); } } @@ -256,6 +259,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } + if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; + } dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } @@ -278,13 +285,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p BREAK_TO_DEBUGGER(); return NULL; } - if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) { - /* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available, - * for now use DCN3.0 clk mgr. - */ - dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base.base; - } + + dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base.base; } #endif @@ -306,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) case FAMILY_NV: if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); - } - if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { + } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); } if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { @@ -321,7 +322,6 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) break; case FAMILY_YELLOW_CARP: - if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev)) dcn31_clk_mgr_destroy(clk_mgr); break; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 0d01aa9f15a6..2108bff49d4e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,6 +38,8 @@ #include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_sh_mask.h" +#include "irq/dcn20/irq_service_dcn20.h" + #undef FN #define FN(reg_name, field_name) \ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name @@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool p_state_change_support; int total_plane_count; + int irq_src; + uint32_t hpd_state; if (dc->work_arounds.skip_clock_update) return; @@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; - if (display_count == 0) + for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { + hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); + if (hpd_state) + break; + } + + if (display_count == 0 && !hpd_state) enter_display_off = true; if (enter_display_off == safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c new file mode 100644 index 000000000000..db9950244c7b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -0,0 +1,258 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "core_types.h" +#include "dccg.h" +#include "clk_mgr_internal.h" +#include "dcn201_clk_mgr.h" +#include "dcn20/dcn20_clk_mgr.h" +#include "dce100/dce_clk_mgr.h" +#include "dm_helpers.h" +#include "dm_services.h" + +#include "cyan_skillfish_ip_offset.h" +#include "dcn/dcn_2_0_3_offset.h" +#include "dcn/dcn_2_0_3_sh_mask.h" +#include "clk/clk_11_0_1_offset.h" +#include "clk/clk_11_0_1_sh_mask.h" + +#define REG(reg) \ + (clk_mgr->regs->reg) + +#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define CLK_BASE_INNER(seg) \ + CLK_BASE__INST0_SEG ## seg + +#undef FN +#define FN(reg_name, field_name) \ + clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name + +#define CTX \ + clk_mgr->base.ctx +#define DC_LOGGER \ + clk_mgr->base.ctx->logger + +static const struct clk_mgr_registers clk_mgr_regs = { + CLK_COMMON_REG_LIST_DCN_201() +}; + +static const struct clk_mgr_shift clk_mgr_shift = { + CLK_COMMON_MASK_SH_LIST_DCN201_BASE(__SHIFT) +}; + +static const struct clk_mgr_mask clk_mgr_mask = { + CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK) +}; + +void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr, + struct dc_state *context, + bool safe_to_lower) +{ + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + + bool update_dppclk = false; + bool update_dispclk = false; + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) { + clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; + update_dppclk = true; + } + + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) { + clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; + update_dispclk = true; + } + + if (update_dppclk || update_dispclk) { + struct bp_set_dce_clock_parameters dce_clk_params; + struct dc_bios *bp = clk_mgr->ctx->dc_bios; + + if (update_dispclk) { + memset(&dce_clk_params, 0, sizeof(dce_clk_params)); + dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz; + dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; + bp->funcs->set_dce_clock(bp, &dce_clk_params); + } + /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface. + * vbios program DPPCLK to the same DispCLK limitation + */ + } +} + +static void dcn201_init_clocks(struct clk_mgr *clk_mgr) +{ + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.max_supported_dppclk_khz = 1200000; + clk_mgr->clks.max_supported_dispclk_khz = 1200000; +} + +static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; + struct dc *dc = clk_mgr_base->ctx->dc; + int display_count; + bool update_dppclk = false; + bool update_dispclk = false; + bool enter_display_off = false; + bool dpp_clock_lowered = false; + bool force_reset = false; + bool p_state_change_support; + int total_plane_count; + + if (dc->work_arounds.skip_clock_update) + return; + + if (clk_mgr_base->clks.dispclk_khz == 0 || + dc->debug.force_clock_mode & 0x1) { + force_reset = true; + + dcn2_read_clocks_from_hw_dentist(clk_mgr_base); + } + + display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + + if (display_count == 0) + enter_display_off = true; + + if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) + clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; + + if (dc->debug.force_min_dcfclk_mhz > 0) + new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ? + new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000); + + if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) + clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz; + + if (should_set_clock(safe_to_lower, + new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) + clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; + + if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz)) + clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; + + total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context); + p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; + clk_mgr_base->clks.p_state_change_support = p_state_change_support; + } + + if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) + clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; + + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; + + update_dppclk = true; + } + + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + + update_dispclk = true; + } + + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dpp_clock_lowered) { + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + dcn20_update_clocks_update_dentist(clk_mgr, context); + } else { + if (update_dppclk || update_dispclk) + dcn20_update_clocks_update_dentist(clk_mgr, context); + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + } + } +} + +struct clk_mgr_funcs dcn201_funcs = { + .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .update_clocks = dcn201_update_clocks, + .init_clocks = dcn201_init_clocks, + .get_clock = dcn2_get_clock, +}; + +void dcn201_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_internal *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg) +{ + struct dc_debug_options *debug = &ctx->dc->debug; + struct dc_bios *bp = ctx->dc_bios; + clk_mgr->base.ctx = ctx; + clk_mgr->base.funcs = &dcn201_funcs; + clk_mgr->regs = &clk_mgr_regs; + clk_mgr->clk_mgr_shift = &clk_mgr_shift; + clk_mgr->clk_mgr_mask = &clk_mgr_mask; + + clk_mgr->dccg = dccg; + + clk_mgr->dfs_bypass_disp_clk = 0; + + clk_mgr->dprefclk_ss_percentage = 0; + clk_mgr->dprefclk_ss_divider = 1000; + clk_mgr->ss_on_dprefclk = false; + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { + dcn201_funcs.update_clocks = dcn2_update_clocks_fpga; + clk_mgr->base.dprefclk_khz = 600000; + clk_mgr->base.dentist_vco_freq_khz = 3000000; + } else { + clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT); + clk_mgr->base.dprefclk_khz *= 100; + + if (clk_mgr->base.dprefclk_khz == 0) + clk_mgr->base.dprefclk_khz = 600000; + + REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz); + clk_mgr->base.dentist_vco_freq_khz *= 100000; + + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3000000; + } + + if (!debug->disable_dfs_bypass && bp->integrated_info) + if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) + clk_mgr->dfs_bypass_enabled = true; + + dce_clock_read_ss_info(clk_mgr); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h new file mode 100644 index 000000000000..ae463baaff47 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN201_CLK_MGR_H__ +#define __DCN201_CLK_MGR_H__ + +void dcn201_clk_mgr_construct(struct dc_context *ctx, + struct clk_mgr_internal *clk_mgr, + struct pp_smu_funcs *pp_smu, + struct dccg *dccg); + +#endif //__DCN201_CLK_MGR_H__
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 6185f9475fa2..ac2d4c4f04e4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,6 +42,7 @@ #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" +#include "irq/dcn21/irq_service_dcn21.h" /* Constants */ @@ -66,11 +67,9 @@ int rn_get_active_display_cnt_wa( for (i = 0; i < context->stream_count; i++) { const struct dc_stream_state *stream = context->streams[i]; - /* Extend the WA to DP for Linux*/ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A || stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK || - stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK || - stream->signal == SIGNAL_TYPE_DISPLAY_PORT) + stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) tmds_present = true; } @@ -131,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; + int irq_src; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; + uint32_t hpd_state; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -149,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { display_count = rn_get_active_display_cnt_wa(dc, context); + + for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { + hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); + if (hpd_state) + break; + } + /* if we can go lower, go lower */ - if (display_count == 0) { + if (display_count == 0 && !hpd_state) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 7046da14bb2a..3eee32faa208 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -582,8 +582,8 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 7.95, + .sr_enter_plus_exit_time_us = 9, .valid = true, }, { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 4a4894e9d9c9..a13ff1783b9b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -66,7 +66,7 @@ #define TO_CLK_MGR_DCN31(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn31, base) -int dcn31_get_active_display_cnt_wa( +static int dcn31_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) { @@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa( const struct dc_link *link = dc->links[i]; /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ - if (link->link_enc->funcs->is_dig_enabled && + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc)) display_count++; } @@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) } } -static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { @@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, true); + dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { dcn31_smu_set_Z9_support(clk_mgr, false); + dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -217,14 +219,17 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, update_dispclk = true; } - /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/ if (dpp_clock_lowered) { // increase per DPP DTO before lowering global dppclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); } else { // increase global DPPCLK before lowering per DPP DTO if (update_dppclk || update_dispclk) dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } // notify DMCUB of latest clocks @@ -279,7 +284,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn31_smu_enable_pme_wa(clk_mgr); } -static void dcn31_init_clocks(struct clk_mgr *clk_mgr) +void dcn31_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate @@ -289,7 +294,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; } -static bool dcn31_are_clock_states_equal(struct dc_clocks *a, +bool dcn31_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) { if (a->dispclk_khz != b->dispclk_khz) @@ -366,32 +371,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 5.32, - .sr_enter_plus_exit_time_us = 6.38, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.82, - .sr_enter_plus_exit_time_us = 11.196, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.89, - .sr_enter_plus_exit_time_us = 11.24, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 9.748, - .sr_enter_plus_exit_time_us = 11.102, + .sr_exit_time_us = 11.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } @@ -518,14 +523,21 @@ static unsigned int find_clk_for_voltage( unsigned int voltage) { int i; + int max_voltage = 0; + int clock = 0; for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) { - if (clock_table->SocVoltage[i] == voltage) + if (clock_table->SocVoltage[i] == voltage) { return clocks[i]; + } else if (clock_table->SocVoltage[i] >= max_voltage && + clock_table->SocVoltage[i] < voltage) { + max_voltage = clock_table->SocVoltage[i]; + clock = clocks[i]; + } } - ASSERT(0); - return 0; + ASSERT(clock); + return clock; } void dcn31_clk_mgr_helper_populate_bw_params( @@ -640,7 +652,7 @@ void dcn31_clk_mgr_construct( sizeof(struct dcn31_watermarks), &clk_mgr->smu_wm_set.mc_address.quad_part); - if (clk_mgr->smu_wm_set.wm_set == 0) { + if (!clk_mgr->smu_wm_set.wm_set) { clk_mgr->smu_wm_set.wm_set = &dummy_wms; clk_mgr->smu_wm_set.mc_address.quad_part = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index f8f100535526..961b10a49486 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -39,6 +39,13 @@ struct clk_mgr_dcn31 { struct dcn31_smu_watermark_set smu_wm_set; }; +bool dcn31_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b); +void dcn31_init_clocks(struct clk_mgr *clk_mgr); +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower); + void dcn31_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_dcn31 *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c798c65d4276..17b7408d84b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -221,14 +221,33 @@ static bool create_links( link = link_create(&link_init_params); if (link) { - dc->links[dc->link_count] = link; - link->dc = dc; - ++dc->link_count; + dc->links[dc->link_count] = link; + link->dc = dc; + ++dc->link_count; } } DC_LOG_DC("BIOS object table - end"); + /* Create a link for each usb4 dpia port */ + for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { + struct link_init_data link_init_params = {0}; + struct dc_link *link; + + link_init_params.ctx = dc->ctx; + link_init_params.connector_index = i; + link_init_params.link_index = dc->link_count; + link_init_params.dc = dc; + link_init_params.is_dpia_link = true; + + link = link_create(&link_init_params); + if (link) { + dc->links[dc->link_count] = link; + link->dc = dc; + ++dc->link_count; + } + } + for (i = 0; i < num_virtual_links; i++) { struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); struct encoder_init_data enc_init = {0}; @@ -255,6 +274,24 @@ static bool create_links( goto failed_alloc; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && + dc->caps.dp_hpo && + link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) { + /* FPGA case - Allocate HPO DP link encoder */ + if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) { + link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i]; + + if (link->hpo_dp_link_enc == NULL) { + BREAK_TO_DEBUGGER(); + goto failed_alloc; + } + link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; + link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; + } + } +#endif + link->link_status.dpcd_caps = &link->dpcd_caps; enc_init.ctx = dc->ctx; @@ -276,6 +313,75 @@ failed_alloc: return false; } +/* Create additional DIG link encoder objects if fewer than the platform + * supports were created during link construction. This can happen if the + * number of physical connectors is less than the number of DIGs. + */ +static bool create_link_encoders(struct dc *dc) +{ + bool res = true; + unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; + unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; + int i; + + /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG + * link encoders and physical display endpoints and does not require + * additional link encoder objects. + */ + if (num_usb4_dpia == 0) + return res; + + /* Create as many link encoder objects as the platform supports. DPIA + * endpoints can be programmably mapped to any DIG. + */ + if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { + for (i = 0; i < num_dig_link_enc; i++) { + struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; + + if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { + link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, + (enum engine_id)(ENGINE_ID_DIGA + i)); + if (link_enc) { + dc->res_pool->link_encoders[i] = link_enc; + dc->res_pool->dig_link_enc_count++; + } else { + res = false; + } + } + } + } + + return res; +} + +/* Destroy any additional DIG link encoder objects created by + * create_link_encoders(). + * NB: Must only be called after destroy_links(). + */ +static void destroy_link_encoders(struct dc *dc) +{ + unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; + unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; + int i; + + /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG + * link encoders and physical display endpoints and does not require + * additional link encoder objects. + */ + if (num_usb4_dpia == 0) + return; + + for (i = 0; i < num_dig_link_enc; i++) { + struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; + + if (link_enc) { + link_enc->funcs->destroy(&link_enc); + dc->res_pool->link_encoders[i] = NULL; + dc->res_pool->dig_link_enc_count--; + } + } +} + static struct dc_perf_trace *dc_perf_trace_create(void) { return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); @@ -702,6 +808,10 @@ void dc_stream_set_static_screen_params(struct dc *dc, static void dc_destruct(struct dc *dc) { + // reset link encoder assignment table on destruct + if (dc->res_pool->funcs->link_encs_assign) + link_enc_cfg_init(dc, dc->current_state); + if (dc->current_state) { dc_release_state(dc->current_state); dc->current_state = NULL; @@ -709,6 +819,8 @@ static void dc_destruct(struct dc *dc) destroy_links(dc); + destroy_link_encoders(dc); + if (dc->clk_mgr) { dc_destroy_clk_mgr(dc->clk_mgr); dc->clk_mgr = NULL; @@ -908,13 +1020,16 @@ static bool dc_construct(struct dc *dc, goto fail; } - dc_resource_state_construct(dc, dc->current_state); - if (!create_links(dc, init_params->num_virtual_links)) goto fail; - /* Initialise DIG link encoder resource tracking variables. */ - link_enc_cfg_init(dc, dc->current_state); + /* Create additional DIG link encoder objects if fewer than the platform + * supports were created during link construction. + */ + if (!create_link_encoders(dc)) + goto fail; + + dc_resource_state_construct(dc, dc->current_state); return true; @@ -971,6 +1086,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) struct dc_stream_state *old_stream = dc->current_state->res_ctx.pipe_ctx[i].stream; bool should_disable = true; + bool pipe_split_change = + context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; for (j = 0; j < context->stream_count; j++) { if (old_stream == context->streams[j]) { @@ -978,6 +1095,9 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) break; } } + if (!should_disable && pipe_split_change) + should_disable = true; + if (should_disable && old_stream) { dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); @@ -1544,7 +1664,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) } #if defined(CONFIG_DRM_AMD_DC_DCN) -void dc_z10_restore(struct dc *dc) +void dc_z10_restore(const struct dc *dc) { if (dc->hwss.z10_restore) dc->hwss.z10_restore(dc); @@ -1711,6 +1831,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) dc_stream_log(dc, stream); } + /* + * Previous validation was perfomred with fast_validation = true and + * the full DML state required for hardware programming was skipped. + * + * Re-validate here to calculate these parameters / watermarks. + */ + result = dc_validate_global_state(dc, context, false); + if (result != DC_OK) { + DC_LOG_ERROR("DC commit global validation failure: %s (%d)", + dc_status_to_str(result), result); + return result; + } + result = dc_commit_state_no_check(dc, context); return (result == DC_OK); @@ -1773,6 +1906,27 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) return false; } +#ifdef CONFIG_DRM_AMD_DC_DCN +/* Perform updates here which need to be deferred until next vupdate + * + * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered + * but forcing lut memory to shutdown state is immediate. This causes + * single frame corruption as lut gets disabled mid-frame unless shutdown + * is deferred until after entering bypass. + */ +static void process_deferred_updates(struct dc *dc) +{ + int i = 0; + + if (dc->debug.enable_mem_low_power.bits.cm) { + ASSERT(dc->dcn_ip->max_num_dpp); + for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) + if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) + dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); + } +} +#endif /* CONFIG_DRM_AMD_DC_DCN */ + void dc_post_update_surfaces_to_stream(struct dc *dc) { int i; @@ -1783,6 +1937,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (dc->ctx->dce_version >= DCE_VERSION_MAX) + TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); + else + TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + if (is_flip_pending_in_pipes(dc, context)) return; @@ -1793,6 +1952,10 @@ void dc_post_update_surfaces_to_stream(struct dc *dc) dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); } +#ifdef CONFIG_DRM_AMD_DC_DCN + process_deferred_updates(dc); +#endif + dc->hwss.optimize_bandwidth(dc, context); dc->optimized_required = false; @@ -1990,7 +2153,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->dcc.enable != u->surface->dcc.enable - || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks + || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { /* During DCC on/off, stutter period is calculated before * DCC has fully transitioned. This results in incorrect @@ -2143,6 +2306,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.gamma_change = 1; } + if (u->lut3d_func || u->func_shaper) + update_flags->bits.lut_3d = 1; + if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { update_flags->bits.hdr_mult = 1; @@ -2156,6 +2322,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change + || update_flags->bits.lut_3d || update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change) { type = UPDATE_TYPE_FULL; @@ -2214,6 +2381,11 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->dsc_config) su_flags->bits.dsc_changed = 1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream_update->mst_bw_update) + su_flags->bits.mst_bw = 1; +#endif + if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2591,6 +2763,15 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dsc_config) dp_update_dsc_config(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (stream_update->mst_bw_update) { + if (stream_update->mst_bw_update->is_increase) + dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + else + dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); + } +#endif + if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, stream->test_pattern.type, @@ -2703,7 +2884,8 @@ static void commit_planes_for_stream(struct dc *dc, #endif if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -2968,6 +3150,14 @@ void dc_commit_updates_for_stream(struct dc *dc, if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) new_pipe->plane_state->force_full_update = true; } + } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { + /* + * Previous frame finished and HW is ready for optimization. + * + * Only relevant for DCN behavior where we can guarantee the optimization + * is safe to apply - retain the legacy behavior for DCE. + */ + dc_post_update_surfaces_to_stream(dc); } @@ -3024,14 +3214,11 @@ void dc_commit_updates_for_stream(struct dc *dc, pipe_ctx->plane_state->force_full_update = false; } } - /*let's use current_state to update watermark etc*/ - if (update_type >= UPDATE_TYPE_FULL) { - dc_post_update_surfaces_to_stream(dc); - if (dc_ctx->dce_version >= DCE_VERSION_MAX) - TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); - else - TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); + /* Legacy optimization path for DCE. */ + if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { + dc_post_update_surfaces_to_stream(dc); + TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); } return; @@ -3334,6 +3521,7 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_ bool dc_set_psr_allow_active(struct dc *dc, bool enable) { int i; + bool allow_active; for (i = 0; i < dc->current_state->stream_count ; i++) { struct dc_link *link; @@ -3345,10 +3533,12 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable) if (link->psr_settings.psr_feature_enabled) { if (enable && !link->psr_settings.psr_allow_active) { - if (!dc_link_set_psr_allow_active(link, true, false, false)) + allow_active = true; + if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) return false; } else if (!enable && link->psr_settings.psr_allow_active) { - if (!dc_link_set_psr_allow_active(link, false, true, false)) + allow_active = false; + if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) return false; } } @@ -3432,6 +3622,13 @@ void dc_hardware_release(struct dc *dc) */ bool dc_enable_dmub_notifications(struct dc *dc) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ + if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) + return true; +#endif /* dmub aux needs dmub notifications to be enabled */ return dc->debug.enable_dmub_aux_for_legacy_ddc; } @@ -3457,7 +3654,12 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; cmd.dp_aux_access.header.payload_bytes = 0; - cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; + /* For dpia, ddc_pin is set to NULL */ + if (!dc->links[link_index]->ddc->ddc_pin) + cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; + else + cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; + cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; cmd.dp_aux_access.aux_control.timeout = 0; @@ -3501,6 +3703,130 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, return true; } +uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, + uint8_t dpia_port_index) +{ + uint8_t index, link_index = 0xFF; + + for (index = 0; index < dc->link_count; index++) { + /* ddc_hw_inst has dpia port index for dpia links + * and ddc instance for legacy links + */ + if (!dc->links[index]->ddc->ddc_pin) { + if (dc->links[index]->ddc_hw_inst == dpia_port_index) { + link_index = index; + break; + } + } + } + ASSERT(link_index != 0xFF); + return link_index; +} + +/** + ***************************************************************************** + * Function: dc_process_dmub_set_config_async + * + * @brief + * Submits set_config command to dmub via inbox message + * + * @param + * [in] dc: dc structure + * [in] link_index: link index + * [in] payload: aux payload + * [out] notify: set_config immediate reply + * + * @return + * True if successful, False if failure + ***************************************************************************** + */ +bool dc_process_dmub_set_config_async(struct dc *dc, + uint32_t link_index, + struct set_config_cmd_payload *payload, + struct dmub_notification *notify) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; + bool is_cmd_complete = true; + + /* prepare SET_CONFIG command */ + cmd.set_config_access.header.type = DMUB_CMD__DPIA; + cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; + + cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; + cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; + cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; + + if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { + /* command is not processed by dmub */ + notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; + return is_cmd_complete; + } + + /* command processed by dmub, if ret_status is 1, it is completed instantly */ + if (cmd.set_config_access.header.ret_status == 1) + notify->sc_status = cmd.set_config_access.set_config_control.immed_status; + else + /* cmd pending, will receive notification via outbox */ + is_cmd_complete = false; + + return is_cmd_complete; +} + +/** + ***************************************************************************** + * Function: dc_process_dmub_set_mst_slots + * + * @brief + * Submits mst slot allocation command to dmub via inbox message + * + * @param + * [in] dc: dc structure + * [in] link_index: link index + * [in] mst_alloc_slots: mst slots to be allotted + * [out] mst_slots_in_use: mst slots in use returned in failure case + * + * @return + * DC_OK if successful, DC_ERROR if failure + ***************************************************************************** + */ +enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, + uint32_t link_index, + uint8_t mst_alloc_slots, + uint8_t *mst_slots_in_use) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; + + /* prepare MST_ALLOC_SLOTS command */ + cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; + cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; + + cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; + cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; + + if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) + /* command is not processed by dmub */ + return DC_ERROR_UNEXPECTED; + + /* command processed by dmub, if ret_status is 1 */ + if (cmd.set_config_access.header.ret_status != 1) + /* command processing error */ + return DC_ERROR_UNEXPECTED; + + /* command processed and we have a status of 2, mst not enabled in dpia */ + if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) + return DC_FAIL_UNSUPPORTED_1; + + /* previously configured mst alloc and used slots did not match */ + if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { + *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; + return DC_NOT_SUPPORTED; + } + + return DC_OK; +} + /** * dc_disable_accelerated_mode - disable accelerated mode * @dc: dc structure @@ -3509,3 +3835,57 @@ void dc_disable_accelerated_mode(struct dc *dc) { bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); } + + +/** + ***************************************************************************** + * dc_notify_vsync_int_state() - notifies vsync enable/disable state + * @dc: dc structure + * @stream: stream where vsync int state changed + * @enable: whether vsync is enabled or disabled + * + * Called when vsync is enabled/disabled + * Will notify DMUB to start/stop ABM interrupts after steady state is reached + * + ***************************************************************************** + */ +void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) +{ + int i; + int edp_num; + struct pipe_ctx *pipe = NULL; + struct dc_link *link = stream->sink->link; + struct dc_link *edp_links[MAX_NUM_EDP]; + + + if (link->psr_settings.psr_feature_enabled) + return; + + /*find primary pipe associated with stream*/ + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) + break; + } + + if (i == MAX_PIPES) { + ASSERT(0); + return; + } + + get_edp_links(dc, edp_links, &edp_num); + + /* Determine panel inst */ + for (i = 0; i < edp_num; i++) { + if (edp_links[i] == link) + break; + } + + if (i == edp_num) { + return; + } + + if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) + pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8bd7f42a8053..faa0bc308fc8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -51,6 +51,8 @@ #include "inc/link_enc_cfg.h" #include "inc/link_dpcd.h" +#include "dc/dcn30/dcn30_vpg.h" + #define DC_LOGGER_INIT(logger) #define LINK_INFO(...) \ @@ -64,6 +66,31 @@ /******************************************************************************* * Private functions ******************************************************************************/ +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link) +{ + struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder( + link->dc->res_pool); + + if (!link->hpo_dp_link_enc && enc) { + link->hpo_dp_link_enc = enc; + link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; + link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; + } + + return (link->hpo_dp_link_enc != NULL); +} + +static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link) +{ + if (link->hpo_dp_link_enc) { + link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN; + link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN; + link->hpo_dp_link_enc = NULL; + } +} +#endif + static void dc_link_destruct(struct dc_link *link) { int i; @@ -91,6 +118,12 @@ static void dc_link_destruct(struct dc_link *link) link->link_enc->funcs->destroy(&link->link_enc); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->hpo_dp_link_enc) { + remove_dp_hpo_link_encoder_from_link(link); + } +#endif + if (link->local_sink) dc_sink_release(link->local_sink); @@ -237,10 +270,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) /* Link may not have physical HPD pin. */ if (link->ep_type != DISPLAY_ENDPOINT_PHY) { - if (link->hpd_status) - *type = dc_connection_single; - else + if (link->is_hpd_pending || !link->hpd_status) *type = dc_connection_none; + else + *type = dc_connection_single; return true; } @@ -641,13 +674,13 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) static void read_current_link_settings_on_detect(struct dc_link *link) { - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; uint8_t link_bw_set; uint8_t link_rate_set; uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; - union max_down_spread max_down_spread = { {0} }; + union max_down_spread max_down_spread = {0}; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -725,6 +758,18 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif + #if defined(CONFIG_DRM_AMD_DC_HDCP) /* In case of fallback to SST when topology discovery below fails * HDCP caps will be querried again later by the upper layer (caller @@ -928,6 +973,11 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) + add_dp_hpo_link_encoder_to_link(link); +#endif + if (link->type == dc_connection_mst_branch) { LINK_INFO("link=%d, mst branch is now Connected\n", link->link_index); @@ -1165,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; @@ -1173,6 +1227,11 @@ static bool dc_link_detect_helper(struct dc_link *link, sizeof(link->mst_stream_alloc_table.stream_allocations)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + reset_dp_hpo_stream_encoders_for_link(link); +#endif + link->type = dc_connection_none; sink_caps.signal = SIGNAL_TYPE_NONE; /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk @@ -1209,6 +1268,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } } +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_restore(dc); +#endif + /* get out of low power state */ if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); @@ -1378,8 +1441,8 @@ static enum transmitter translate_encoder_to_transmitter(struct graphics_object_ } } -static bool dc_link_construct(struct dc_link *link, - const struct link_init_data *init_params) +static bool dc_link_construct_legacy(struct dc_link *link, + const struct link_init_data *init_params) { uint8_t i; struct ddc_service_init_data ddc_service_init_data = { { 0 } }; @@ -1549,6 +1612,9 @@ static bool dc_link_construct(struct dc_link *link, } DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); +#if defined(CONFIG_DRM_AMD_DC_DCN) + DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); +#endif /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. @@ -1610,6 +1676,14 @@ static bool dc_link_construct(struct dc_link *link, DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); } + + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { + link->bios_forced_drive_settings.VOLTAGE_SWING = + (info->ext_disp_conn_info.fixdpvoltageswing & 0x3); + link->bios_forced_drive_settings.PRE_EMPHASIS = + ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); + } + break; } } @@ -1651,6 +1725,80 @@ create_fail: return false; } +static bool dc_link_construct_dpia(struct dc_link *link, + const struct link_init_data *init_params) +{ + struct ddc_service_init_data ddc_service_init_data = { { 0 } }; + struct dc_context *dc_ctx = init_params->ctx; + + DC_LOGGER_INIT(dc_ctx->logger); + + /* Initialized irq source for hpd and hpd rx */ + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->link_status.dpcd_caps = &link->dpcd_caps; + + link->dc = init_params->dc; + link->ctx = dc_ctx; + link->link_index = init_params->link_index; + + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); + + /* Dummy Init for linkid */ + link->link_id.type = OBJECT_TYPE_CONNECTOR; + link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; + link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; + link->is_internal_display = false; + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + LINK_INFO("Connector[%d] description:signal %d\n", + init_params->connector_index, + link->connector_signal); + + link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; + link->is_dig_mapping_flexible = true; + + /* TODO: Initialize link : funcs->link_init */ + + ddc_service_init_data.ctx = link->ctx; + ddc_service_init_data.id = link->link_id; + ddc_service_init_data.link = link; + /* Set indicator for dpia link so that ddc won't be created */ + ddc_service_init_data.is_dpia_link = true; + + link->ddc = dal_ddc_service_create(&ddc_service_init_data); + if (!link->ddc) { + DC_ERROR("Failed to create ddc_service!\n"); + goto ddc_create_fail; + } + + /* Set dpia port index : 0 to number of dpia ports */ + link->ddc_hw_inst = init_params->connector_index; + + /* TODO: Create link encoder */ + + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ + link->wa_flags.dp_mot_reset_segment = true; + + return true; + +ddc_create_fail: + return false; +} + +static bool dc_link_construct(struct dc_link *link, + const struct link_init_data *init_params) +{ + /* Handle dpia case */ + if (init_params->is_dpia_link) + return dc_link_construct_dpia(link, init_params); + else + return dc_link_construct_legacy(link, init_params); +} /******************************************************************************* * Public functions ******************************************************************************/ @@ -1741,17 +1889,47 @@ static enum dc_status enable_link_dp(struct dc_state *state, /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); + /* Train with fallback when enabling DPIA link. Conventional links are + * trained with fallback during sink detection. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + do_fallback = true; + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Temporary w/a to get DP2.0 link rates to work with SST. + * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. + */ + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link->dc->debug.set_mst_en_for_sst) { + dp_enable_mst_on_sink(link, true); + } +#endif + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ + } else { + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = + link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + if (state->clk_mgr && !apply_seamless_boot_optimization) + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); + } +#else pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = - link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, - state, false); + state, false); +#endif // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); @@ -1780,7 +1958,12 @@ static enum dc_status enable_link_dp(struct dc_state *state, else fec_enable = true; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) + dp_set_fec_enable(link, fec_enable); +#else dp_set_fec_enable(link, fec_enable); +#endif // during mode set we do DP_SET_POWER off then on, aux writes are lost if (link->dpcd_sink_ext_caps.bits.oled == 1 || @@ -1832,6 +2015,57 @@ static enum dc_status enable_link_dp_mst( return enable_link_dp(state, pipe_ctx); } +void dc_link_blank_all_dp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || + (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) + continue; + + /* DP 2.0 spec requires that we read LTTPR caps first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + dc_link_blank_dp_stream(dc->links[i], true); + } + +} + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ + unsigned int j; + struct dc *dc = link->ctx->dc; + enum signal_type signal = link->connector_signal; + + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->get_dig_frontend && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) { + unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + + if (fe != ENGINE_ID_UNKNOWN) + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(link, + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + dp_receiver_power_ctrl(link, false); + } +} + static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, enum engine_id eng_id, struct ext_hdmi_settings *settings) @@ -2284,6 +2518,9 @@ static void disable_link(struct dc_link *link, enum signal_type signal) if (dc_is_dp_signal(signal)) { /* SST DP, eDP */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link_settings link_settings = link->cur_link_settings; +#endif if (dc_is_dp_sst_signal(signal)) dp_disable_link_phy(link, signal); else @@ -2291,8 +2528,15 @@ static void disable_link(struct dc_link *link, enum signal_type signal) if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, false); + } +#else dp_set_fec_enable(link, false); dp_set_fec_ready(link, false); +#endif } } else { if (signal != SIGNAL_TYPE_VIRTUAL) @@ -2475,9 +2719,14 @@ static bool dp_active_dongle_validate_timing( break; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && + dongle_caps->extendedCapValid == true) { +#else if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; +#endif /* Check Pixel Encoding */ switch (timing->pixel_encoding) { @@ -2520,6 +2769,89 @@ static bool dp_active_dongle_validate_timing( if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + } + + if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && + dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && + dongle_caps->dfp_cap_ext.supported) { + + if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) + return false; + + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_666 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) + return false; + } + } +#endif + return true; } @@ -2586,13 +2918,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link) int dc_link_get_backlight_level(const struct dc_link *link) { - struct abm *abm = get_abm_from_stream_res(link); + struct panel_cntl *panel_cntl = link->panel_cntl; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + bool fw_set_brightness = true; - if (abm == NULL || abm->funcs->get_current_backlight == NULL) - return DC_ERROR_UNEXPECTED; + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - return (int) abm->funcs->get_current_backlight(abm); + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) + return panel_cntl->funcs->get_current_backlight(panel_cntl); + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) + return (int) abm->funcs->get_current_backlight(abm); + else + return DC_ERROR_UNEXPECTED; } int dc_link_get_target_backlight_pwm(const struct dc_link *link) @@ -2654,8 +2994,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, return true; } -bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, - bool wait, bool force_static) +bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; @@ -2668,20 +3008,33 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; - link->psr_settings.psr_allow_active = allow_active; + /* Set power optimization flag */ + if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { + link->psr_settings.psr_power_opt = *power_opts; + + if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); + } + + /* Enable or Disable PSR */ + if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { + link->psr_settings.psr_allow_active = *allow_active; + #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!allow_active) - dc_z10_restore(dc); + if (!link->psr_settings.psr_allow_active) + dc_z10_restore(dc); #endif - if (psr != NULL && link->psr_settings.psr_feature_enabled) { - if (force_static && psr->funcs->psr_force_static) - psr->funcs->psr_force_static(psr, panel_inst); - psr->funcs->psr_enable(psr, allow_active, wait, panel_inst); - } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled) - dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); - else - return false; + if (psr != NULL && link->psr_settings.psr_feature_enabled) { + if (force_static && psr->funcs->psr_force_static) + psr->funcs->psr_force_static(psr, panel_inst); + psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); + } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && + link->psr_settings.psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); + else + return false; + } return true; } @@ -2970,10 +3323,12 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) static void update_mst_stream_alloc_table( struct dc_link *link, struct stream_encoder *stream_enc, +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? +#endif const struct dp_mst_stream_allocation_table *proposed_table) { - struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { - { 0 } }; + struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; struct link_mst_stream_allocation *dc_alloc; int i; @@ -3006,6 +3361,9 @@ static void update_mst_stream_alloc_table( work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; work_table[i].stream_enc = stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; +#endif } } @@ -3016,6 +3374,108 @@ static void update_mst_stream_alloc_table( link->mst_stream_alloc_table.stream_allocations[i] = work_table[i]; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) +{ + const uint32_t VCP_Y_PRECISION = 1000; + uint64_t vcp_x, vcp_y; + + // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision + avg_time_slots_per_mtp = dc_fixpt_add( + avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION)); + + vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp); + vcp_y = dc_fixpt_floor( + dc_fixpt_mul_int( + dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)), + VCP_Y_PRECISION)); + + if (link->type == dc_connection_mst_branch) + DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " + "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); + else + DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " + "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION); +} + +/* + * Payload allocation/deallocation for SST introduced in DP2.0 + */ +enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + DC_LOGGER_INIT(link->ctx->logger); + + /* slot X.Y for SST payload deallocate */ + if (!allocate) { + avg_time_slots_per_mtp = dc_fixpt_from_int(0); + + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + } + + /* calculate VC payload and update branch with new payload allocation table*/ + if (!dpcd_write_128b_132b_sst_payload_allocation_table( + stream, + link, + &proposed_table, + allocate)) { + DC_LOG_ERROR("SST Update Payload: Failed to update " + "allocation table for " + "pipe idx: %d\n", + pipe_ctx->pipe_idx); + } + + proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder; + + ASSERT(proposed_table.stream_count == 1); + + //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id + DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " + "vcp_id: %d " + "slot_count: %d\n", + (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, + proposed_table.stream_allocations[0].vcp_id, + proposed_table.stream_allocations[0].slot_count); + + /* program DP source TX for payload */ + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &proposed_table); + + /* poll for ACT handled */ + if (!dpcd_poll_for_allocation_change_trigger(link)) { + // Failures will result in blackscreen and errors logged + BREAK_TO_DEBUGGER(); + } + + /* slot X.Y for SST payload allocate */ + if (allocate) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); + + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + } + + /* Always return DC_OK. + * If part of sequence fails, log failure(s) and show blackscreen + */ + return DC_OK; +} +#endif /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm @@ -3024,16 +3484,27 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; - struct link_encoder *link_encoder = link->link_enc; + struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; +#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; - uint8_t i; + int i; enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); + /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ + if (link->ep_type == DISPLAY_ENDPOINT_PHY) + link_encoder = link->link_enc; + else if (link->dc->res_pool->funcs->link_encs_assign) + link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream); + ASSERT(link_encoder); + /* enable_link_dp_mst already check link->enabled_stream_count * and stream is in link->stream[]. This is called during set mode, * stream_enc is available. @@ -3046,7 +3517,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, true)) { update_mst_stream_alloc_table( +#if defined(CONFIG_DRM_AMD_DC_DCN) + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); +#else link, pipe_ctx->stream_res.stream_enc, &proposed_table); +#endif } else DC_LOG_WARNING("Failed to update" @@ -3060,23 +3538,70 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#else + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#endif } ASSERT(proposed_table.stream_count > 0); + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + static enum dc_status status; + uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; + + for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++) + mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count; + + status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, + mst_alloc_slots, &prev_mst_slots_in_use); + ASSERT(status == DC_OK); + DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", + status, mst_alloc_slots, prev_mst_slots_in_use); + } + /* program DP source TX for payload */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else link_encoder->funcs->update_mst_stream_allocation_table( link_encoder, &link->mst_stream_alloc_table); +#endif /* send down message */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3099,26 +3624,215 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); +#endif return DC_OK; } -static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; struct link_encoder *link_encoder = link->link_enc; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); uint8_t i; + enum act_return_status ret; + DC_LOGGER_INIT(link->ctx->logger); + + /* decrease throttled vcp size */ + pbn_per_slot = get_pbn_per_slot(stream); + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } else { + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + return DC_OK; +} + +enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct link_encoder *link_encoder = link->link_enc; + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + struct dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + enum act_return_status ret; + DC_LOGGER_INIT(link->ctx->logger); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* increase throttled vcp size */ + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + pbn_per_slot = get_pbn_per_slot(stream); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + + return DC_OK; +} +#endif + +static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct link_encoder *link_encoder = NULL; + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; +#endif + struct dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); + int i; bool mst_mode = (link->type == dc_connection_mst_branch); DC_LOGGER_INIT(link->ctx->logger); + /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ + if (link->ep_type == DISPLAY_ENDPOINT_PHY) + link_encoder = link->link_enc; + else if (link->dc->res_pool->funcs->link_encs_assign) + link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream); + ASSERT(link_encoder); + /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link * stream[] yet. For this, payload is not allocated yet, so de-alloc @@ -3127,9 +3841,28 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + avg_time_slots_per_mtp); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( + hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + avg_time_slots_per_mtp); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else stream_encoder->funcs->set_throttled_vcp_size( stream_encoder, avg_time_slots_per_mtp); +#endif /* TODO: which component is responsible for remove payload table? */ if (mst_mode) { @@ -3139,8 +3872,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, false)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); +#else update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, &proposed_table); +#endif } else { DC_LOG_WARNING("Failed to update" @@ -3156,20 +3897,67 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { +#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#else + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); +#endif + } + + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + enum dc_status status; + uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; + + for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++) + mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count; + + status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, + mst_alloc_slots, &prev_mst_slots_in_use); + ASSERT(status != DC_NOT_SUPPORTED); + DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", + status, mst_alloc_slots, prev_mst_slots_in_use); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { + case DP_8b_10b_ENCODING: + link_encoder->funcs->update_mst_stream_allocation_table( + link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_128b_132b_ENCODING: + hpo_dp_link_encoder->funcs->update_stream_allocation_table( + hpo_dp_link_encoder, + &link->mst_stream_alloc_table); + break; + case DP_UNKNOWN_ENCODING: + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } +#else link_encoder->funcs->update_mst_stream_allocation_table( link_encoder, &link->mst_stream_alloc_table); +#endif if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3190,6 +3978,10 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) { struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct link_encoder *link_enc = NULL; +#endif + if (cp_psp && cp_psp->funcs.update_stream_config) { struct cp_psp_stream_config config = {0}; enum dp_panel_mode panel_mode = @@ -3201,8 +3993,81 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; #if defined(CONFIG_DRM_AMD_DC_DCN) config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; - config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; - config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || + pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + link_enc = pipe_ctx->stream->link->link_enc; + config.dio_output_type = pipe_ctx->stream->link->ep_type; + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = pipe_ctx->stream->link->link_enc; + else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + } + ASSERT(link_enc); + + // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 + config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + // Add flag to guard new A0 DIG mapping + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && + pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) { + config.dig_be = link_enc->preferred_engine; + config.dio_output_type = pipe_ctx->stream->link->ep_type; + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + } else { + config.dio_output_type = 0; + config.dio_output_idx = 0; + } + + // Add flag to guard B0 implementation + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && + link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + // enum ID 1-4 maps to DPIA PHY ID 0-3 + config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; + } else { // for non DPIA mode over B0, ABCDE maps to 01564 + + switch (link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + config.phy_idx = 0; + break; + case TRANSMITTER_UNIPHY_B: + config.phy_idx = 1; + break; + case TRANSMITTER_UNIPHY_C: + config.phy_idx = 5; + break; + case TRANSMITTER_UNIPHY_D: + config.phy_idx = 6; + break; + case TRANSMITTER_UNIPHY_E: + config.phy_idx = 4; + break; + default: + config.phy_idx = 0; + break; + } + + } + } + } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream( + pipe_ctx->stream->ctx->dc, + pipe_ctx->stream); + config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */ + } + ASSERT(link_enc); + if (link_enc) + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (is_dp_128b_132b_signal(pipe_ctx)) { + config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst; + config.dp2_enabled = 1; + } #endif config.dpms_off = dpms_off; config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; @@ -3214,15 +4079,103 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) } #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + uint8_t req_slot_count = 0; + uint8_t vc_id = 1; /// VC ID always 1 for SST + + struct dc_link_settings link_settings = {0}; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + decide_link_settings(stream, &link_settings); + stream->link->cur_link_settings = link_settings; + + /* Enable clock, Configure lane count, and Enable Link Encoder*/ + enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings); + +#ifdef DIAGS_BUILD + /* Workaround for FPGA HPO capture DP link data: + * HPO capture will set link to active mode + * This workaround is required to get a capture from start of frame + */ + if (!dc->debug.fpga_hpo_capture_en) { + struct encoder_set_dp_phy_pattern_param params = {0}; + params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE; + + /* Set link active */ + stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern( + stream->link->hpo_dp_link_enc, + ¶ms); + } +#endif + + /* Enable DP_STREAM_ENC */ + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + dp_set_dsc_pps_sdp(pipe_ctx, true, true); + } + + /* Allocate Payload */ + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { + // MST case + uint8_t i; + + proposed_table.stream_count = state->stream_count; + for (i = 0; i < state->stream_count; i++) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_allocations[i].slot_count = req_slot_count; + proposed_table.stream_allocations[i].vcp_id = i+1; + /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ + proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; + } + } else { + // SST case + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_count = 1; /// Always 1 stream for SST + proposed_table.stream_allocations[0].slot_count = req_slot_count; + proposed_table.stream_allocations[0].vcp_id = vc_id; + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + } + + stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table( + stream->link->hpo_dp_link_enc, + &proposed_table); + + stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size( + stream->link->hpo_dp_link_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + avg_time_slots_per_mtp); + + + + dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); +} +#endif + void core_link_enable_stream( struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; enum dc_status status; + struct link_encoder *link_enc; #if defined(CONFIG_DRM_AMD_DC_DCN) enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + + if (is_dp_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; #endif DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -3230,23 +4183,57 @@ void core_link_enable_stream( dc_is_virtual_signal(pipe_ctx->stream->signal)) return; + if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream); + else + link_enc = stream->link->link_enc; + ASSERT(link_enc); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) + && !is_dp_128b_132b_signal(pipe_ctx)) { +#else if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { - stream->link->link_enc->funcs->setup( - stream->link->link_enc, - pipe_ctx->stream->signal); +#endif + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst, stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); } - if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute( + pipe_ctx->stream_res.hpo_dp_stream_enc, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + stream->timing.flags.DSC, + false); + otg_out_dest = OUT_MUX_HPO_DP; + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( + pipe_ctx->stream_res.stream_enc, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + } +#else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( pipe_ctx->stream_res.stream_enc, &stream->timing, stream->output_color_space, stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); +#endif + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute( @@ -3280,9 +4267,18 @@ void core_link_enable_stream( pipe_ctx->stream->apply_edp_fast_boot_optimization = false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); +#endif + resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; @@ -3305,7 +4301,8 @@ void core_link_enable_stream( /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC) { + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); @@ -3342,6 +4339,8 @@ void core_link_enable_stream( */ if (status != DC_FAIL_DP_LINK_TRAINING || pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, pipe_ctx->stream->signal); BREAK_TO_DEBUGGER(); return; } @@ -3357,10 +4356,16 @@ void core_link_enable_stream( * as a workaround for the incorrect value being applied * from transmitter control. */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + is_dp_128b_132b_signal(pipe_ctx))) +#else if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) - stream->link->link_enc->funcs->setup( - stream->link->link_enc, - pipe_ctx->stream->signal); +#endif + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); dc->hwss.enable_stream(pipe_ctx); @@ -3369,12 +4374,17 @@ void core_link_enable_stream( if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { dp_set_dsc_on_rx(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true); + dp_set_dsc_pps_sdp(pipe_ctx, true, true); } } if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + is_dp_128b_132b_signal(pipe_ctx)) + dc_link_update_sst_payload(pipe_ctx, true); +#endif dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); @@ -3391,6 +4401,11 @@ void core_link_enable_stream( dc->hwss.enable_audio_stream(pipe_ctx); } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); + } +#endif if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); @@ -3407,6 +4422,12 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + + if (is_dp_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; +#endif if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) @@ -3426,6 +4447,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + is_dp_128b_132b_signal(pipe_ctx)) + dc_link_update_sst_payload(pipe_ctx, false); +#endif if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { struct ext_hdmi_settings settings = {0}; @@ -3452,14 +4478,44 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) } } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + !is_dp_128b_132b_signal(pipe_ctx)) { + + /* In DP1.x SST mode, our encoder will go to TPS1 + * when link is on but stream is off. + * Disabling link before stream will avoid exposing TPS1 pattern + * during the disable sequence as it will confuse some receivers + * state machine. + * In DP2 or MST mode, our encoder will stay video active + */ + disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + dc->hwss.disable_stream(pipe_ctx); + } else { + dc->hwss.disable_stream(pipe_ctx); + disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + } +#else disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); dc->hwss.disable_stream(pipe_ctx); +#endif if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); + } +#endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (vpg && vpg->funcs->vpg_powerdown) + vpg->funcs->vpg_powerdown(vpg); +#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -3592,6 +4648,13 @@ void dc_link_set_preferred_training_settings(struct dc *dc, if (link_setting != NULL) { link->preferred_link_setting = *link_setting; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_setting) == + DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) { + if (!add_dp_hpo_link_encoder_to_link(link)) + memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting)); + } +#endif } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; @@ -3633,6 +4696,38 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_setting) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + + switch (dp_get_link_encoding_format(link_setting)) { + case DP_8b_10b_ENCODING: + /* For 8b/10b encoding: + * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. + * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. + */ + link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + if (dc_link_should_enable_fec(link)) { + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + } + break; + case DP_128b_132b_ENCODING: + /* For 128b/132b encoding: + * link rate is defined in the unit of 10mbps per lane. + * total data bandwidth efficiency is always 96.71%. + */ + link_rate_per_lane_kbps = link_setting->link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + break; + } + + /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ + return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; +#else uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ @@ -3663,9 +4758,9 @@ uint32_t dc_link_bandwidth_kbps( long long fec_link_bw_kbps = link_bw_kbps * 970LL; link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL)); } - return link_bw_kbps; +#endif } const struct dc_link_settings *dc_link_get_link_cap( @@ -3692,14 +4787,14 @@ bool dc_link_is_fec_supported(const struct dc_link *link) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state); + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); } else link_enc = link->link_enc; ASSERT(link_enc); - return (dc_is_dp_signal(link->connector_signal) && + return (dc_is_dp_signal(link->connector_signal) && link_enc && link_enc->features.fec_supported && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); @@ -3713,8 +4808,10 @@ bool dc_link_should_enable_fec(const struct dc_link *link) if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && link->local_sink->edid_caps.panel_patch.disable_fec) || - (link->connector_signal == SIGNAL_TYPE_EDP && - link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP + (link->connector_signal == SIGNAL_TYPE_EDP + // enable FEC for EDP if DSC is supported + && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false + )) is_fec_disable = true; if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable) @@ -3735,7 +4832,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( timing->dsc_cfg.bits_per_pixel, timing->dsc_cfg.num_slices_h, timing->dsc_cfg.is_dp); -#endif +#endif /* CONFIG_DRM_AMD_DC_DCN */ switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index ba6b56f20269..24dc662ec3e4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -196,7 +196,8 @@ static void ddc_service_construct( ddc_service->link = init_data->link; ddc_service->ctx = init_data->ctx; - if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) { + if (init_data->is_dpia_link || + dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info) != BP_RESULT_OK) { ddc_service->ddc_pin = NULL; } else { DC_LOGGER_INIT(ddc_service->ctx->logger); @@ -553,6 +554,7 @@ bool dal_ddc_service_query_ddc_data( payload.address = address; payload.reply = NULL; payload.defer_delay = get_defer_delay(ddc); + payload.write_status_update = false; if (write_size != 0) { payload.write = true; @@ -624,24 +626,24 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, do { struct aux_payload current_payload; bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length; + payload->length; + uint32_t payload_length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; current_payload.address = payload->address; current_payload.data = &payload->data[retrieved]; current_payload.defer_delay = payload->defer_delay; current_payload.i2c_over_aux = payload->i2c_over_aux; - current_payload.length = is_end_of_payload ? - payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; - /* set mot (middle of transaction) to false - * if it is the last payload - */ + current_payload.length = payload_length; + /* set mot (middle of transaction) to false if it is the last payload */ current_payload.mot = is_end_of_payload ? payload->mot:true; + current_payload.write_status_update = false; current_payload.reply = payload->reply; current_payload.write = payload->write; ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); - retrieved += current_payload.length; + retrieved += payload_length; } while (retrieved < payload->length && ret == true); return ret; @@ -658,10 +660,12 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { - if (dc_enable_dmub_notifications(ddc->ctx->dc)) + if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc || + !ddc->ddc_pin) { return dce_aux_transfer_dmub_raw(ddc, payload, operation_result); - else + } else { return dce_aux_transfer_raw(ddc, payload, operation_result); + } } /* dc_link_aux_transfer_with_retries() - Attempt to submit an @@ -760,7 +764,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service) dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), &tmds_config, sizeof(tmds_config)); if (tmds_config & 0x1) { - union hdmi_scdc_status_flags_data status_data = { {0} }; + union hdmi_scdc_status_flags_data status_data = {0}; uint8_t scramble_status = 0; offset = HDMI_SCDC_SCRAMBLER_STATUS; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 330edd666b7d..297553074bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1,4 +1,26 @@ -/* Copyright 2015 Advanced Micro Devices, Inc. */ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ #include "dm_services.h" #include "dc.h" #include "dc_link_dp.h" @@ -14,6 +36,7 @@ #include "dpcd_defs.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "inc/dc_link_dpia.h" #include "inc/link_enc_cfg.h" /*Travis*/ @@ -39,6 +62,43 @@ enum { POST_LT_ADJ_REQ_TIMEOUT = 200 }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dp_lt_fallback_entry { + enum dc_lane_count lane_count; + enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { + /* This link training fallback array is ordered by + * link bandwidth from highest to lowest. + * DP specs makes it a normative policy to always + * choose the next highest link bandwidth during + * link training fallback. + */ + {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR20}, + {LANE_COUNT_TWO, LINK_RATE_UHBR10}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, + {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_HIGH2}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_HIGH3}, + {LANE_COUNT_FOUR, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH2}, + {LANE_COUNT_TWO, LINK_RATE_HIGH}, + {LANE_COUNT_TWO, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_LOW}, +}; +#endif + static bool decide_fallback_link_setting( struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, @@ -46,21 +106,37 @@ static bool decide_fallback_link_setting( static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b); +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, const struct dc_link_settings *link_settings) { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; - +#if defined(CONFIG_DRM_AMD_DC_DCN) memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } +#else core_link_read_dpcd( link, DP_TRAINING_AUX_RD_INTERVAL, (uint8_t *)&training_rd_interval, sizeof(training_rd_interval)); if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; +#endif return wait_in_micro_secs; } @@ -68,6 +144,36 @@ static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, const struct dc_link_settings *link_settings) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + union training_aux_rd_interval training_rd_interval; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + core_link_read_dpcd( + link, + DP_128b_132b_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } + + switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { + case 0: return 400; + case 1: return 4000; + case 2: return 8000; + case 3: return 12000; + case 4: return 16000; + case 5: return 32000; + case 6: return 64000; + default: return 400; + } +#else union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 400; @@ -87,13 +193,21 @@ static uint32_t get_eq_training_aux_rd_interval( } return wait_in_micro_secs; +#endif } void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (wait_in_micro_secs > 16000) + msleep(wait_in_micro_secs/1000); + else + udelay(wait_in_micro_secs); +#else udelay(wait_in_micro_secs); +#endif DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -121,6 +235,17 @@ enum dpcd_training_patterns case DP_TRAINING_PATTERN_SEQUENCE_4: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + dpcd_tr_pattern = DPCD_128b_132b_TPS1; + break; + case DP_128b_132b_TPS2: + dpcd_tr_pattern = DPCD_128b_132b_TPS2; + break; + case DP_128b_132b_TPS2_CDS: + dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; + break; +#endif case DP_TRAINING_PATTERN_VIDEOIDLE: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; break; @@ -138,7 +263,7 @@ static void dpcd_set_training_pattern( struct dc_link *link, enum dc_dp_training_pattern training_pattern) { - union dpcd_training_pattern dpcd_pattern = { {0} }; + union dpcd_training_pattern dpcd_pattern = {0}; dpcd_pattern.v1_4.TRAINING_PATTERN_SET = dc_dp_training_pattern_to_dpcd_training_pattern( @@ -159,13 +284,57 @@ static void dpcd_set_training_pattern( static enum dc_dp_training_pattern decide_cr_training_pattern( const struct dc_link_settings *link_settings) { - return DP_TRAINING_PATTERN_SEQUENCE_1; + switch (dp_get_link_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + default: + return DP_TRAINING_PATTERN_SEQUENCE_1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_ENCODING: + return DP_128b_132b_TPS1; +#endif + } } static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, const struct dc_link_settings *link_settings) { struct link_encoder *link_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct encoder_feature_support *enc_caps; + struct dpcd_caps *rx_caps = &link->dpcd_caps; + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + + /* Access link encoder capability based on whether it is statically + * or dynamically assigned to a link. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + else + link_enc = link->link_enc; + ASSERT(link_enc); + enc_caps = &link_enc->features; + + switch (dp_get_link_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + if (enc_caps->flags.bits.IS_TPS4_CAPABLE && + rx_caps->max_down_spread.bits.TPS4_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && + rx_caps->max_ln_count.bits.TPS3_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_3; + else + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + case DP_128b_132b_ENCODING: + pattern = DP_128b_132b_TPS2; + break; + default: + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + } + return pattern; +#else enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; struct encoder_feature_support *features; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; @@ -175,7 +344,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -196,7 +365,38 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li return DP_TRAINING_PATTERN_SEQUENCE_3; return DP_TRAINING_PATTERN_SEQUENCE_2; +#endif +} + +#if defined(CONFIG_DRM_AMD_DC_DCN) +static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ + uint8_t link_rate = 0; + enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings); + + if (encoding == DP_128b_132b_ENCODING) + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + link_rate = 0x1; + break; + case LINK_RATE_UHBR20: + link_rate = 0x2; + break; + case LINK_RATE_UHBR13_5: + link_rate = 0x4; + break; + default: + link_rate = 0; + break; + } + else if (encoding == DP_8b_10b_ENCODING) + link_rate = (uint8_t) link_settings->link_rate; + else + link_rate = 0; + + return link_rate; } +#endif enum dc_status dpcd_set_link_settings( struct dc_link *link, @@ -205,8 +405,8 @@ enum dc_status dpcd_set_link_settings( uint8_t rate; enum dc_status status; - union down_spread_ctrl downspread = { {0} }; - union lane_count_set lane_count_set = { {0} }; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; downspread.raw = (uint8_t) (lt_settings->link_settings.link_spread); @@ -230,7 +430,7 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { rate = 0; /* WA for some MUX chips that will power down with eDP and lose supported @@ -247,7 +447,11 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + rate = get_dpcd_link_rate(<_settings->link_settings); +#else rate = (uint8_t) (lt_settings->link_settings.link_rate); +#endif status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); } @@ -289,6 +493,10 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( disable_scrabled_data_symbols = 1; break; case DP_TRAINING_PATTERN_SEQUENCE_4: +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + case DP_128b_132b_TPS2: +#endif disable_scrabled_data_symbols = 0; break; default: @@ -311,13 +519,10 @@ static void dpcd_set_lt_pattern_and_lane_settings( enum dc_dp_training_pattern pattern, uint32_t offset) { - union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; - uint32_t dpcd_base_lt_offset; uint8_t dpcd_lt_buffer[5] = {0}; - union dpcd_training_pattern dpcd_pattern = { {0} }; - uint32_t lane; + union dpcd_training_pattern dpcd_pattern = { 0 }; uint32_t size_in_bytes; bool edp_workaround = false; /* TODO link_prop.INTERNAL */ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; @@ -350,53 +555,57 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_base_lt_offset, dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } - /***************************************************************** - * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set - *****************************************************************/ - for (lane = 0; lane < - (uint32_t)(lt_settings->link_settings.lane_count); lane++) { - - dpcd_lane[lane].bits.VOLTAGE_SWING_SET = - (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING); - dpcd_lane[lane].bits.PRE_EMPHASIS_SET = - (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS); - - dpcd_lane[lane].bits.MAX_SWING_REACHED = - (lt_settings->lane_settings[lane].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); - dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED = - (lt_settings->lane_settings[lane].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); - } /* concatenate everything into one buffer*/ - - size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]); + size_in_bytes = lt_settings->link_settings.lane_count * + sizeof(lt_settings->dpcd_lane_settings[0]); // 0x00103 - 0x00102 memmove( &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], - dpcd_lane, + lt_settings->dpcd_lane_settings, size_in_bytes); if (is_repeater(link, offset)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, offset, dpcd_base_lt_offset, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, dpcd_base_lt_offset, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is @@ -411,9 +620,18 @@ static void dpcd_set_lt_pattern_and_lane_settings( core_link_write_dpcd( link, DP_TRAINING_LANE0_SET, - (uint8_t *)(dpcd_lane), + (uint8_t *)(lt_settings->dpcd_lane_settings), size_in_bytes); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + sizeof(dpcd_lt_buffer)); +#endif } else /* write it all in (1 + number-of-lanes)-byte burst*/ core_link_write_dpcd( @@ -421,8 +639,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_base_lt_offset, dpcd_lt_buffer, size_in_bytes + sizeof(dpcd_pattern.raw)); - - link->cur_lane_setting = lt_settings->lane_settings[0]; } bool dp_is_cr_done(enum dc_lane_count ln_count, @@ -464,27 +680,75 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status) return align_status.bits.INTERLANE_ALIGN_DONE == 1; } -void dp_update_drive_settings( - struct link_training_settings *dest, - struct link_training_settings src) +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint8_t lane = 0; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); + dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); + dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = + (hw_lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (hw_lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = + hw_lane_settings[lane].FFE_PRESET.settings.level; + } +#endif + } +} + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; - for (lane = 0; lane < src.link_settings.lane_count; lane++) { - if (dest->voltage_swing == NULL) - dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING; - else - dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing; - if (dest->pre_emphasis == NULL) - dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS; - else - dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis; + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(ln_adjust[lane].bits. + VOLTAGE_SWING_LANE); + hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(ln_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + hw_lane_settings[lane].FFE_PRESET.raw = + ln_adjust[lane].tx_ffe.PRESET_VALUE; + } +#endif + } + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); - if (dest->post_cursor2 == NULL) - dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2; - else - dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2; + if (lt_settings->disallow_per_lane_settings) { + /* we find the maximum of the requested settings across all lanes*/ + /* and set this maximum for all lanes*/ + maximize_lane_settings(lt_settings, hw_lane_settings); + override_lane_settings(lt_settings, hw_lane_settings); + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); } + } static uint8_t get_nibble_at_index(const uint8_t *buf, @@ -514,46 +778,31 @@ static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( } -static void find_max_drive_settings( - const struct link_training_settings *link_training_setting, - struct link_training_settings *max_lt_setting) +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) { uint32_t lane; struct dc_lane_settings max_requested; - max_requested.VOLTAGE_SWING = - link_training_setting-> - lane_settings[0].VOLTAGE_SWING; - max_requested.PRE_EMPHASIS = - link_training_setting-> - lane_settings[0].PRE_EMPHASIS; - /*max_requested.postCursor2 = - * link_training_setting->laneSettings[0].postCursor2;*/ + max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; + max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; +#if defined(CONFIG_DRM_AMD_DC_DCN) + max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; +#endif /* Determine what the maximum of the requested settings are*/ - for (lane = 1; lane < link_training_setting->link_settings.lane_count; - lane++) { - if (link_training_setting->lane_settings[lane].VOLTAGE_SWING > - max_requested.VOLTAGE_SWING) - - max_requested.VOLTAGE_SWING = - link_training_setting-> - lane_settings[lane].VOLTAGE_SWING; - - if (link_training_setting->lane_settings[lane].PRE_EMPHASIS > - max_requested.PRE_EMPHASIS) - max_requested.PRE_EMPHASIS = - link_training_setting-> - lane_settings[lane].PRE_EMPHASIS; - - /* - if (link_training_setting->laneSettings[lane].postCursor2 > - max_requested.postCursor2) - { - max_requested.postCursor2 = - link_training_setting->laneSettings[lane].postCursor2; - } - */ + for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { + if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) + max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; + + if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) + max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (lane_settings[lane].FFE_PRESET.settings.level > + max_requested.FFE_PRESET.settings.level) + max_requested.FFE_PRESET.settings.level = + lane_settings[lane].FFE_PRESET.settings.level; +#endif } /* make sure the requested settings are @@ -563,10 +812,10 @@ static void find_max_drive_settings( if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; - /* - if (max_requested.postCursor2 > PostCursor2_MaxLevel) - max_requested.postCursor2 = PostCursor2_MaxLevel; - */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) + max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; +#endif /* make sure the pre-emphasis matches the voltage swing*/ if (max_requested.PRE_EMPHASIS > @@ -576,57 +825,58 @@ static void find_max_drive_settings( get_max_pre_emphasis_for_voltage_swing( max_requested.VOLTAGE_SWING); - /* - * Post Cursor2 levels are completely independent from - * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels - * can only be applied to each allowable combination of voltage - * swing and pre-emphasis levels */ - /* if ( max_requested.postCursor2 > - * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing)) - * max_requested.postCursor2 = - * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing); - */ - - max_lt_setting->link_settings.link_rate = - link_training_setting->link_settings.link_rate; - max_lt_setting->link_settings.lane_count = - link_training_setting->link_settings.lane_count; - max_lt_setting->link_settings.link_spread = - link_training_setting->link_settings.link_spread; - - for (lane = 0; lane < - link_training_setting->link_settings.lane_count; - lane++) { - max_lt_setting->lane_settings[lane].VOLTAGE_SWING = - max_requested.VOLTAGE_SWING; - max_lt_setting->lane_settings[lane].PRE_EMPHASIS = - max_requested.PRE_EMPHASIS; - /*max_lt_setting->laneSettings[lane].postCursor2 = - * max_requested.postCursor2; - */ + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; + lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; +#if defined(CONFIG_DRM_AMD_DC_DCN) + lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; +#endif } +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + if (lt_settings->voltage_swing == NULL && + lt_settings->pre_emphasis == NULL && +#if defined(CONFIG_DRM_AMD_DC_DCN) + lt_settings->ffe_preset == NULL && +#endif + lt_settings->post_cursor2 == NULL) + + return; + for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) { + if (lt_settings->voltage_swing) + lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; + if (lt_settings->pre_emphasis) + lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; + if (lt_settings->post_cursor2) + lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (lt_settings->ffe_preset) + lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; +#endif + } } -enum dc_status dp_get_lane_status_and_drive_settings( +enum dc_status dp_get_lane_status_and_lane_adjust( struct dc_link *link, const struct link_training_settings *link_training_setting, - union lane_status *ln_status, - union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], uint32_t offset) { unsigned int lane01_status_address = DP_LANE0_1_STATUS; uint8_t lane_adjust_offset = 4; unsigned int lane01_adjust_address; uint8_t dpcd_buf[6] = {0}; - union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; - struct link_training_settings request_settings = { {0} }; uint32_t lane; enum dc_status status; - memset(req_settings, '\0', sizeof(struct link_training_settings)); - if (is_repeater(link, offset)) { lane01_status_address = DP_LANE0_1_STATUS_PHY_REPEATER1 + @@ -646,11 +896,11 @@ enum dc_status dp_get_lane_status_and_drive_settings( ln_status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); - dpcd_lane_adjust[lane].raw = + ln_adjust[lane].raw = get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); } - ln_status_updated->raw = dpcd_buf[2]; + ln_align->raw = dpcd_buf[2]; if (is_repeater(link, offset)) { DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -689,39 +939,6 @@ enum dc_status dp_get_lane_status_and_drive_settings( dpcd_buf[lane_adjust_offset + 1]); } - /*copy to req_settings*/ - request_settings.link_settings.lane_count = - link_training_setting->link_settings.lane_count; - request_settings.link_settings.link_rate = - link_training_setting->link_settings.link_rate; - request_settings.link_settings.link_spread = - link_training_setting->link_settings.link_spread; - - for (lane = 0; lane < - (uint32_t)(link_training_setting->link_settings.lane_count); - lane++) { - - request_settings.lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits. - VOLTAGE_SWING_LANE); - request_settings.lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits. - PRE_EMPHASIS_LANE); - } - - /*Note: for postcursor2, read adjusted - * postcursor2 settings from*/ - /*DpcdAddress_AdjustRequestPostCursor2 = - *0x020C (not implemented yet)*/ - - /* we find the maximum of the requested settings across all lanes*/ - /* and set this maximum for all lanes*/ - find_max_drive_settings(&request_settings, req_settings); - - /* if post cursor 2 is needed in the future, - * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C - */ - return status; } @@ -730,8 +947,6 @@ enum dc_status dpcd_set_lane_settings( const struct link_training_settings *link_training_setting, uint32_t offset) { - union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}}; - uint32_t lane; unsigned int lane0_set_address; enum dc_status status; @@ -741,71 +956,53 @@ enum dc_status dpcd_set_lane_settings( lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); - for (lane = 0; lane < - (uint32_t)(link_training_setting-> - link_settings.lane_count); - lane++) { - dpcd_lane[lane].bits.VOLTAGE_SWING_SET = - (uint8_t)(link_training_setting-> - lane_settings[lane].VOLTAGE_SWING); - dpcd_lane[lane].bits.PRE_EMPHASIS_SET = - (uint8_t)(link_training_setting-> - lane_settings[lane].PRE_EMPHASIS); - dpcd_lane[lane].bits.MAX_SWING_REACHED = - (link_training_setting-> - lane_settings[lane].VOLTAGE_SWING == - VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); - dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED = - (link_training_setting-> - lane_settings[lane].PRE_EMPHASIS == - PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); - } - status = core_link_write_dpcd(link, lane0_set_address, - (uint8_t *)(dpcd_lane), + (uint8_t *)(link_training_setting->dpcd_lane_settings), link_training_setting->link_settings.lane_count); - /* - if (LTSettings.link.rate == LinkRate_High2) - { - DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0}; - for ( uint32_t lane = 0; - lane < lane_count_DPMax; lane++) - { - dpcd_lane2[lane].bits.post_cursor2_set = - static_cast<unsigned char>( - LTSettings.laneSettings[lane].postCursor2); - dpcd_lane2[lane].bits.max_post_cursor2_reached = 0; - } - m_pDpcdAccessSrv->WriteDpcdData( - DpcdAddress_Lane0Set2, - reinterpret_cast<unsigned char*>(dpcd_lane2), - LTSettings.link.lanes); - } - */ - if (is_repeater(link, offset)) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, offset, lane0_set_address, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == + DP_8b_10b_ENCODING) +#endif DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, - dpcd_lane[0].bits.VOLTAGE_SWING_SET, - dpcd_lane[0].bits.PRE_EMPHASIS_SET, - dpcd_lane[0].bits.MAX_SWING_REACHED, - dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED); + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } - link->cur_lane_setting = link_training_setting->lane_settings[0]; return status; } @@ -817,7 +1014,7 @@ bool dp_is_max_vs_reached( for (lane = 0; lane < (uint32_t)(lt_settings->link_settings.lane_count); lane++) { - if (lt_settings->lane_settings[lane].VOLTAGE_SWING + if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET == VOLTAGE_SWING_MAX_LEVEL) return true; } @@ -847,17 +1044,17 @@ static bool perform_post_lt_adj_req_sequence( adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; adj_req_timer++) { - struct link_training_settings req_settings; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, DPRX); if (dpcd_lane_status_updated.bits. @@ -875,11 +1072,10 @@ static bool perform_post_lt_adj_req_sequence( for (lane = 0; lane < (uint32_t)(lane_count); lane++) { if (lt_settings-> - lane_settings[lane].VOLTAGE_SWING != - req_settings.lane_settings[lane]. - VOLTAGE_SWING || - lt_settings->lane_settings[lane].PRE_EMPHASIS != - req_settings.lane_settings[lane].PRE_EMPHASIS) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { req_drv_setting_changed = true; break; @@ -887,8 +1083,8 @@ static bool perform_post_lt_adj_req_sequence( } if (req_drv_setting_changed) { - dp_update_drive_settings( - lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); dc_link_dp_set_drive_settings(link, lt_settings); @@ -932,6 +1128,14 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval case 0x04: aux_rd_interval_us = 16000; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case 0x05: + aux_rd_interval_us = 32000; + break; + case 0x06: + aux_rd_interval_us = 64000; + break; +#endif default: break; } @@ -960,20 +1164,24 @@ static enum link_training_result perform_channel_equalization_sequence( struct link_training_settings *lt_settings, uint32_t offset) { - struct link_training_settings req_settings; enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; uint32_t wait_time_microsec; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; - union lane_align_status_updated dpcd_lane_status_updated = { {0} }; - union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; /* Note: also check that TPS4 is a supported feature*/ - tr_pattern = lt_settings->pattern_for_eq; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_repeater(link, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; +#else if (is_repeater(link, offset)) tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; +#endif dp_set_hw_training_pattern(link, tr_pattern, offset); @@ -1010,12 +1218,12 @@ static enum link_training_result perform_channel_equalization_sequence( /* 4. Read lane status and requested * drive settings as set by the sink*/ - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, offset); /* 5. check CR done*/ @@ -1029,7 +1237,8 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ - dp_update_drive_settings(lt_settings, req_settings); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } return LINK_TRAINING_EQ_FAIL_EQ; @@ -1055,10 +1264,10 @@ static enum link_training_result perform_clock_recovery_sequence( uint32_t retries_cr; uint32_t retry_count; uint32_t wait_time_microsec; - struct link_training_settings req_settings; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; retries_cr = 0; retry_count = 0; @@ -1112,12 +1321,12 @@ static enum link_training_result perform_clock_recovery_sequence( /* 4. Read lane status and requested drive * settings as set by the sink */ - dp_get_lane_status_and_drive_settings( + dp_get_lane_status_and_lane_adjust( link, lt_settings, dpcd_lane_status, &dpcd_lane_status_updated, - &req_settings, + dpcd_lane_adjust, offset); /* 5. check CR done*/ @@ -1125,23 +1334,35 @@ static enum link_training_result perform_clock_recovery_sequence( return LINK_TRAINING_SUCCESS; /* 6. max VS reached*/ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if ((dp_get_link_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) && + dp_is_max_vs_reached(lt_settings)) + break; +#else if (dp_is_max_vs_reached(lt_settings)) break; +#endif /* 7. same lane settings*/ /* Note: settings are the same for all lanes, * so comparing first lane is sufficient*/ - if ((lt_settings->lane_settings[0].VOLTAGE_SWING == - req_settings.lane_settings[0].VOLTAGE_SWING) - && (lt_settings->lane_settings[0].PRE_EMPHASIS == - req_settings.lane_settings[0].PRE_EMPHASIS)) + if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == + dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) retries_cr++; +#endif else retries_cr = 0; /* 8. update VS/PE/PC2 in lt_settings*/ - dp_update_drive_settings(lt_settings, req_settings); - + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); retry_count++; } @@ -1161,7 +1382,7 @@ static inline enum link_training_result dp_transition_to_video_idle( struct link_training_settings *lt_settings, enum link_training_result status) { - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; /* 4. mainlink output idle pattern*/ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); @@ -1172,7 +1393,11 @@ static inline enum link_training_result dp_transition_to_video_idle( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || +#if defined(CONFIG_DRM_AMD_DC_DCN) + lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { +#else lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) { +#endif /* delay 5ms after Main Link output idle pattern and then check * DPCD 0202h. */ @@ -1266,8 +1491,40 @@ static inline void decide_8b_10b_training_settings( lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); lt_settings->enhanced_framing = 1; lt_settings->should_set_fec_ready = true; + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static inline void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, 0, sizeof(*lt_settings)); + + lt_settings->link_settings = *link_settings; + /* TODO: should decide link spread when populating link_settings */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : + LINK_SPREAD_05_DOWNSPREAD_30KHZ; + + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); + lt_settings->eq_pattern_time = 2500; + lt_settings->eq_wait_time_limit = 400000; + lt_settings->eq_loop_count_limit = 20; + lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; + lt_settings->cds_pattern_time = 2500; + lt_settings->cds_wait_time_limit = (dp_convert_to_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; + lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ? + LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT; + lt_settings->disallow_per_lane_settings = true; + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} +#endif + void dp_decide_training_settings( struct dc_link *link, const struct dc_link_settings *link_settings, @@ -1275,6 +1532,10 @@ void dp_decide_training_settings( { if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) decide_8b_10b_training_settings(link, link_settings, lt_settings); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) + decide_128b_132b_training_settings(link, link_settings, lt_settings); +#endif } static void override_training_settings( @@ -1284,12 +1545,6 @@ static void override_training_settings( { uint32_t lane; - /* Override link settings */ - if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) - lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate; - if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN) - lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count; - /* Override link spread */ if (!link->dp_ss_off && overrides->downspread != NULL) lt_settings->link_settings.link_spread = *overrides->downspread ? @@ -1303,6 +1558,17 @@ static void override_training_settings( lt_settings->pre_emphasis = overrides->pre_emphasis; if (overrides->post_cursor2 != NULL) lt_settings->post_cursor2 = overrides->post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (overrides->ffe_preset != NULL) + lt_settings->ffe_preset = overrides->ffe_preset; +#endif + /* Override HW lane settings with BIOS forced values if present */ + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && + link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; + lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; + lt_settings->always_match_dpcd_with_hw_lane_settings = false; + } for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lt_settings->lane_settings[lane].VOLTAGE_SWING = lt_settings->voltage_swing != NULL ? @@ -1318,6 +1584,9 @@ static void override_training_settings( : POST_CURSOR2_DISABLED; } + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + /* Initialize training timings */ if (overrides->cr_pattern_time != NULL) lt_settings->cr_pattern_time = *overrides->cr_pattern_time; @@ -1362,7 +1631,7 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) return 0; // invalid value } -enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) { uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; @@ -1373,7 +1642,7 @@ enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) sizeof(repeater_mode)); } -enum dc_status configure_lttpr_mode_non_transparent( +static enum dc_status configure_lttpr_mode_non_transparent( struct dc_link *link, const struct link_training_settings *lt_settings) { @@ -1416,6 +1685,13 @@ enum dc_status configure_lttpr_mode_non_transparent( if (encoding == DP_8b_10b_ENCODING) { repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Driver does not need to train the first hop. Skip DPCD read and clear + * AUX_RD_INTERVAL for DPTX-to-DPIA hop. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); @@ -1434,7 +1710,7 @@ enum dc_status configure_lttpr_mode_non_transparent( static void repeater_training_done(struct dc_link *link, uint32_t offset) { - union dpcd_training_pattern dpcd_pattern = { {0} }; + union dpcd_training_pattern dpcd_pattern = {0}; const uint32_t dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + @@ -1489,6 +1765,17 @@ static void print_status_message( case LINK_RATE_HIGH3: link_rate = "HBR3"; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR10: + link_rate = "UHBR10"; + break; + case LINK_RATE_UHBR13_5: + link_rate = "UHBR13.5"; + break; + case LINK_RATE_UHBR20: + link_rate = "UHBR20"; + break; +#endif default: break; } @@ -1518,6 +1805,20 @@ static void print_status_message( case LINK_TRAINING_LINK_LOSS: lt_result = "Link loss"; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_LT_FAILED: + lt_result = "LT_FAILED received"; + break; + case DP_128b_132b_MAX_LOOP_COUNT_REACHED: + lt_result = "max loop count reached"; + break; + case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: + lt_result = "channel EQ timeout"; + break; + case DP_128b_132b_CDS_DONE_TIMEOUT: + lt_result = "CDS timeout"; + break; +#endif default: break; } @@ -1537,6 +1838,9 @@ static void print_status_message( } /* Connectivity log: link training */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ +#endif CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", link_rate, lt_settings->link_settings.lane_count, @@ -1553,6 +1857,9 @@ void dc_link_dp_set_drive_settings( /* program ASIC PHY settings*/ dp_set_hw_lane_settings(link, lt_settings, DPRX); + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + /* Notify DP sink the PHY settings from source */ dpcd_set_lane_settings(link, lt_settings, DPRX); } @@ -1619,9 +1926,23 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train static void dpcd_exit_training_mode(struct dc_link *link) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t sink_status = 0; + uint8_t i; +#endif /* clear training pattern set */ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + udelay(1000); + } +#endif } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -1645,6 +1966,137 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, + uint32_t *interval_in_us) +{ + union dp_128b_132b_training_aux_rd_interval dpcd_interval; + uint32_t interval_unit = 0; + + dpcd_interval.raw = 0; + core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL, + &dpcd_interval.raw, sizeof(dpcd_interval.raw)); + interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ + /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * + * INTERVAL_UNIT. The maximum is 256 ms + */ + *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + uint8_t loop_count; + uint32_t aux_rd_interval = 0; + uint32_t wait_time = 0; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Transmit 128b/132b_TPS1 over Main-Link */ + dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX); + /* Set TRAINING_PATTERN_SET to 01h */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + + /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dp_set_hw_lane_settings(link, lt_settings, DPRX); + dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX); + + /* Set loop counter to start from 1 */ + loop_count = 1; + + /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ + dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, + lt_settings->pattern_for_eq, DPRX); + + /* poll for channel EQ done */ + while (status == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); + wait_time += aux_rd_interval; + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, + dpcd_lane_status)) { + /* pass */ + break; + } else if (loop_count >= lt_settings->eq_loop_count_limit) { + status = DP_128b_132b_MAX_LOOP_COUNT_REACHED; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else { + dp_set_hw_lane_settings(link, lt_settings, DPRX); + dpcd_set_lane_settings(link, lt_settings, DPRX); + } + loop_count++; + } + + /* poll for EQ interlane align done */ + while (status == LINK_TRAINING_SUCCESS) { + if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (wait_time >= lt_settings->eq_wait_time_limit) { + status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->eq_pattern_time); + wait_time += lt_settings->eq_pattern_time; + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + } + } + + return status; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + /* Assumption: assume hardware has transmitted eq pattern */ + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; + uint32_t wait_time = 0; + + /* initiate CDS done sequence */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + + /* poll for CDS interlane align done and symbol lock */ + while (status == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->cds_pattern_time); + wait_time += lt_settings->cds_pattern_time; + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + status = DP_128b_132b_LT_FAILED; + } else if (wait_time >= lt_settings->cds_wait_time_limit) { + status = DP_128b_132b_CDS_DONE_TIMEOUT; + } + } + + return status; +} +#endif + static enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, struct link_training_settings *lt_settings) @@ -1686,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { @@ -1701,6 +2153,35 @@ static enum link_training_result dp_perform_8b_10b_link_training( return status; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ + if (link->dc->debug.legacy_dp2_lt) { + struct link_training_settings legacy_settings; + + decide_8b_10b_training_settings(link, + <_settings->link_settings, + &legacy_settings); + return dp_perform_8b_10b_link_training(link, &legacy_settings); + } + + dpcd_set_link_settings(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) + result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings); + + return result; +} +#endif + enum link_training_result dc_link_dp_perform_link_training( struct dc_link *link, const struct dc_link_settings *link_settings, @@ -1735,6 +2216,10 @@ enum link_training_result dc_link_dp_perform_link_training( */ if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, <_settings); +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (encoding == DP_128b_132b_ENCODING) + status = dp_perform_128b_132b_link_training(link, <_settings); +#endif else ASSERT(0); @@ -1772,16 +2257,19 @@ bool perform_link_training_with_retries( /* Dynamically assigned link encoders associated with stream rather than * link. */ - if (link->dc->res_pool->funcs->link_encs_assign) - link_enc = stream->link_enc; + if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); else link_enc = link->link_enc; /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training */ - link_enc->funcs->connect_dig_be_to_fe(link_enc, + if (dp_get_link_encoding_format(¤t_setting) == DP_8b_10b_ENCODING) { + link_enc->funcs->connect_dig_be_to_fe(link_enc, pipe_ctx->stream_res.stream_enc->id, true); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + } for (j = 0; j < attempts; ++j) { @@ -1804,14 +2292,13 @@ bool perform_link_training_with_retries( if (panel_mode == DP_PANEL_MODE_EDP) { struct cp_psp *cp_psp = &stream->ctx->cp_psp; - if (cp_psp && cp_psp->funcs.enable_assr) { - if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) { - /* since eDP implies ASSR on, change panel - * mode to disable ASSR - */ - panel_mode = DP_PANEL_MODE_DEFAULT; - } - } + if (cp_psp && cp_psp->funcs.enable_assr) + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + cp_psp->funcs.enable_assr(cp_psp->handle, link); } #endif @@ -1821,10 +2308,22 @@ bool perform_link_training_with_retries( dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting); return true; } else { - status = dc_link_dp_perform_link_training( - link, - ¤t_setting, - skip_video_pattern); + /** @todo Consolidate USB4 DP and DPx.x training. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dc_link_dpia_perform_link_training(link, + ¤t_setting, + skip_video_pattern); + + /* Transmit idle pattern once training successful. */ + if (status == LINK_TRAINING_SUCCESS) + dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, + NULL, 0); + } else { + status = dc_link_dp_perform_link_training(link, + ¤t_setting, + skip_video_pattern); + } + if (status == LINK_TRAINING_SUCCESS) return true; } @@ -1840,15 +2339,23 @@ bool perform_link_training_with_retries( dp_disable_link_phy(link, signal); /* Abort link training if failure due to sink being unplugged. */ - if (status == LINK_TRAINING_ABORT) - break; - else if (do_fallback) { + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + dc_link_detect_sink(link, &type); + if (type == dc_connection_none) + break; + } else if (do_fallback) { + uint32_t req_bw; + uint32_t link_bw; + decide_fallback_link_setting(*link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. */ - if (dc_bandwidth_in_kbps_from_timing(&stream->timing) < - dc_link_bandwidth_kbps(link, ¤t_setting)) + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); + link_bw = dc_link_bandwidth_kbps(link, ¤t_setting); + if (req_bw > link_bw) break; } @@ -1950,8 +2457,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_cs_id, link_settings); /* Set FEC enable */ - fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; - dp_set_fec_ready(link, fec_enable); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { +#endif + fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; + dp_set_fec_ready(link, fec_enable); +#if defined(CONFIG_DRM_AMD_DC_DCN) + } +#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -1993,23 +2506,59 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) * Still shouldn't turn off dp_receiver (DPCD:600h) */ if (link_down == true) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_link_settings link_settings = link->cur_link_settings; +#endif dp_disable_link_phy(link, link->connector_signal); - dp_set_fec_ready(link, false); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) +#endif + dp_set_fec_ready(link, false); } link->sync_lt_in_progress = false; return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) + lttpr_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + lttpr_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) + lttpr_max_link_rate = LINK_RATE_UHBR10; + + return lttpr_max_link_rate; +} +#endif + bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { + struct link_encoder *link_enc = NULL; + if (!max_link_enc_cap) { DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); return false; } - if (link->link_enc->funcs->get_max_link_cap) { - link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap); + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } else + link_enc = link->link_enc; + ASSERT(link_enc); + + if (link_enc && link_enc->funcs->get_max_link_cap) { + link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); return true; } @@ -2022,9 +2571,31 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ static struct dc_link_settings get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dc_link_rate lttpr_max_link_rate; +#endif + struct link_encoder *link_enc = NULL; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); + } else + link_enc = link->link_enc; + ASSERT(link_enc); /* get max link encoder capability */ - link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + if (link_enc) + link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (max_link_cap.link_rate >= LINK_RATE_UHBR10 && + !link->hpo_dp_link_enc) + max_link_cap.link_rate = LINK_RATE_HIGH3; +#endif /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) @@ -2045,8 +2616,15 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; +#if defined(CONFIG_DRM_AMD_DC_DCN) + lttpr_max_link_rate = get_lttpr_max_link_rate(link); + + if (lttpr_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = lttpr_max_link_rate; +#else if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; +#endif DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, @@ -2056,7 +2634,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) return max_link_cap; } -enum dc_status read_hpd_rx_irq_data( +static enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { @@ -2187,17 +2765,32 @@ bool dp_verify_link_cap( enum link_training_result status; union hpd_irq_data irq_data; - if (link->dc->debug.skip_detection_link_training) { + /* link training starts with the maximum common settings + * supported by both sink and ASIC. + */ + max_link_cap = get_max_link_cap(link); + initial_link_settings = get_common_supported_link_settings( + *known_limit_link_setting, + max_link_cap); + + /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */ + if (link->dc->debug.skip_detection_link_training || + link->is_dig_mapping_flexible) { + /* TODO - should we check link encoder's max link caps here? + * How do we know which link encoder to check from? + */ link->verified_link_cap = *known_limit_link_setting; return true; + } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) { + link->verified_link_cap = initial_link_settings; + return true; } memset(&irq_data, 0, sizeof(irq_data)); success = false; skip_link_training = false; - max_link_cap = get_max_link_cap(link); - /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; @@ -2205,6 +2798,10 @@ bool dp_verify_link_cap( core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + reset_dp_hpo_stream_encoders_for_link(link); +#endif /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -2215,19 +2812,13 @@ bool dp_verify_link_cap( dp_cs_id = get_clock_source_id(link); - /* link training starts with the maximum common settings - * supported by both sink and ASIC. - */ - initial_link_settings = get_common_supported_link_settings( - *known_limit_link_setting, - max_link_cap); cur_link_setting = initial_link_settings; /* Temporary Renoir-specific workaround for SWDEV-215184; * PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle, * so add extra cycle of enabling and disabling the PHY before first link training. */ - if (link->link_enc->features.flags.bits.DP_IS_USB_C && + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) { dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); dp_disable_link_phy(link, link->connector_signal); @@ -2314,7 +2905,7 @@ bool dp_verify_link_cap_with_retries( link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; } else if (dp_verify_link_cap(link, - &link->reported_link_cap, + known_limit_link_setting, &fail_count) && fail_count == 0) { success = true; break; @@ -2329,11 +2920,21 @@ bool dp_verify_mst_link_cap( { struct dc_link_settings max_link_cap = {0}; - max_link_cap = get_max_link_cap(link); - link->verified_link_cap = get_common_supported_link_settings( - link->reported_link_cap, - max_link_cap); - + if (dp_get_link_encoding_format(&link->reported_link_cap) == + DP_8b_10b_ENCODING) { + max_link_cap = get_max_link_cap(link); + link->verified_link_cap = get_common_supported_link_settings( + link->reported_link_cap, + max_link_cap); + } +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(&link->reported_link_cap) == + DP_128b_132b_ENCODING) { + dp_verify_link_cap_with_retries(link, + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); + } +#endif return true; } @@ -2360,7 +2961,17 @@ static struct dc_link_settings get_common_supported_link_settings( * We map it to the maximum supported link rate that * is smaller than MAX_LINK_BW in this case. */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link_settings.link_rate > LINK_RATE_UHBR20) { + link_settings.link_rate = LINK_RATE_UHBR20; + } else if (link_settings.link_rate < LINK_RATE_UHBR20 && + link_settings.link_rate > LINK_RATE_UHBR13_5) { + link_settings.link_rate = LINK_RATE_UHBR13_5; + } else if (link_settings.link_rate < LINK_RATE_UHBR10 && + link_settings.link_rate > LINK_RATE_HIGH3) { +#else if (link_settings.link_rate > LINK_RATE_HIGH3) { +#endif link_settings.link_rate = LINK_RATE_HIGH3; } else if (link_settings.link_rate < LINK_RATE_HIGH3 && link_settings.link_rate > LINK_RATE_HIGH2) { @@ -2405,6 +3016,14 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) { switch (link_rate) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_UHBR20: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_HIGH3; +#endif case LINK_RATE_HIGH3: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: @@ -2439,11 +3058,55 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate) return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH3; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case LINK_RATE_HIGH3: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR20; +#endif default: return LINK_RATE_UNKNOWN; } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static bool decide_fallback_link_setting_max_bw_policy( + const struct dc_link_settings *max, + struct dc_link_settings *cur) +{ + uint8_t cur_idx = 0, next_idx; + bool found = false; + + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find current index */ + if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && + dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) + break; + else + cur_idx++; + + next_idx = cur_idx + 1; + + while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find next index */ + if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count && + dp_lt_fallbacks[next_idx].link_rate <= max->link_rate) + break; + else + next_idx++; + + if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { + cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; + cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; + found = true; + } + + return found; +} +#endif + /* * function: set link rate and lane count fallback based * on current link setting and last link training result @@ -2459,6 +3122,11 @@ static bool decide_fallback_link_setting( { if (!current_link_setting) return false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING) + return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, + current_link_setting); +#endif switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -2678,6 +3346,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin return false; } +static bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->ctx->dc->debug.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) { *link_setting = link->verified_link_cap; @@ -2712,7 +3522,25 @@ void decide_link_settings(struct dc_stream_state *stream, if (decide_mst_link_settings(link, link_setting)) return; } else if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (decide_edp_link_settings(link, link_setting, req_bw)) + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->ctx->dc->debug.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate)) + return; + } else if (decide_edp_link_settings(link, link_setting, req_bw)) return; } else if (decide_dp_link_settings(link, link_setting, req_bw)) return; @@ -2724,7 +3552,7 @@ void decide_link_settings(struct dc_stream_state *stream, } /*************************Short Pulse IRQ***************************/ -static bool allow_hpd_rx_irq(const struct dc_link *link) +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link) { /* * Don't handle RX IRQ unless one of following is met: @@ -2774,6 +3602,8 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) if (psr_error_status.bits.LINK_CRC_ERROR || psr_error_status.bits.RFB_STORAGE_ERROR || psr_error_status.bits.VSC_SDP_ERROR) { + bool allow_active; + /* Acknowledge and clear error bits */ dm_helpers_dp_write_dpcd( link->ctx, @@ -2783,8 +3613,10 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_allow_active(link, false, true, false); - dc_link_set_psr_allow_active(link, true, true, false); + allow_active = false; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -2831,20 +3663,24 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + unsigned char test_pattern_buffer[ + (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - + DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; +#else unsigned char test_pattern_buffer[ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; +#endif unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; - struct dc_link_training_settings link_settings; union lane_adjust dpcd_lane_adjust; unsigned int lane; struct link_training_settings link_training_settings; - int i = 0; dpcd_test_pattern.raw = 0; memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); - memset(&link_settings, 0, sizeof(link_settings)); + memset(&link_training_settings, 0, sizeof(link_training_settings)); /* get phy test pattern and pattern parameters from DP receiver */ core_link_read_dpcd( @@ -2899,6 +3735,35 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_CP2520_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case PHY_TEST_PATTERN_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; + break; + case PHY_TEST_PATTERN_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; + break; + case PHY_TEST_PATTERN_PRBS9: + test_pattern = DP_TEST_PATTERN_PRBS9; + break; + case PHY_TEST_PATTERN_PRBS11: + test_pattern = DP_TEST_PATTERN_PRBS11; + break; + case PHY_TEST_PATTERN_PRBS15: + test_pattern = DP_TEST_PATTERN_PRBS15; + break; + case PHY_TEST_PATTERN_PRBS23: + test_pattern = DP_TEST_PATTERN_PRBS23; + break; + case PHY_TEST_PATTERN_PRBS31: + test_pattern = DP_TEST_PATTERN_PRBS31; + break; + case PHY_TEST_PATTERN_264BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_SQUARE_PULSE: + test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; + break; +#endif default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; @@ -2914,30 +3779,59 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { + test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) + core_link_read_dpcd( + link, + DP_PHY_SQUARE_PATTERN, + test_pattern_buffer, + test_pattern_size); + } + + if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { + test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- + DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_264BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } +#endif + /* prepare link training settings */ - link_settings.link = link->cur_link_settings; + link_training_settings.link_settings = link->cur_link_settings; for (lane = 0; lane < (unsigned int)(link->cur_link_settings.lane_count); lane++) { dpcd_lane_adjust.raw = get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); - link_settings.lane_settings[lane].VOLTAGE_SWING = - (enum dc_voltage_swing) - (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); - link_settings.lane_settings[lane].PRE_EMPHASIS = - (enum dc_pre_emphasis) - (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); - link_settings.lane_settings[lane].POST_CURSOR2 = - (enum dc_post_cursor2) - ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); - } - - for (i = 0; i < 4; i++) - link_training_settings.lane_settings[i] = - link_settings.lane_settings[i]; - link_training_settings.link_settings = link_settings.link; - link_training_settings.allow_invalid_msa_timing_param = false; + if (dp_get_link_encoding_format(&link->cur_link_settings) == + DP_8b_10b_ENCODING) { + link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing) + (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); + link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis) + (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); + link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = + (enum dc_post_cursor2) + ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); + } +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if (dp_get_link_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = + dpcd_lane_adjust.tx_ffe.PRESET_VALUE; + } +#endif + } + + dp_hw_to_dpcd_lane_settings(&link_training_settings, + link_training_settings.hw_lane_settings, + link_training_settings.dpcd_lane_settings); /*Usage: Measure DP physical lane signal * by DP SI test equipment automatically. * PHY test pattern request is generated by equipment via HPD interrupt. @@ -3158,7 +4052,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video } } -static void handle_automated_test(struct dc_link *link) +void dc_link_dp_handle_automated_test(struct dc_link *link) { union test_request test_request; union test_response test_response; @@ -3207,17 +4101,50 @@ static void handle_automated_test(struct dc_link *link) sizeof(test_response)); } -bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss) +void dc_link_dp_handle_link_loss(struct dc_link *link) { - union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } }; - union device_service_irq device_service_clear = { { 0 } }; + int i; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) + break; + } + + if (pipe_ctx == NULL || pipe_ctx->stream == NULL) + return; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + core_link_disable_stream(pipe_ctx); + } + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + core_link_enable_stream(link->dc->current_state, pipe_ctx); + } + } +} + +bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work) +{ + union hpd_irq_data hpd_irq_dpcd_data = {0}; + union device_service_irq device_service_clear = {0}; enum dc_status result; bool status = false; - struct pipe_ctx *pipe_ctx; - int i; if (out_link_loss) *out_link_loss = false; + + if (has_left_work) + *has_left_work = false; /* For use cases related to down stream connection status change, * PSR and device auto test, refer to function handle_sst_hpd_irq * in DAL2.1*/ @@ -3249,11 +4176,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd &device_service_clear.raw, sizeof(device_service_clear.raw)); device_service_clear.raw = 0; - handle_automated_test(link); + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_automated_test(link); return false; } - if (!allow_hpd_rx_irq(link)) { + if (!dc_link_dp_allow_hpd_rx_irq(link)) { DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", __func__, link->link_index); return false; @@ -3267,12 +4197,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd * so do not handle as a normal sink status change interrupt. */ - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; return true; + } /* check if we have MST msg and return since we poll for it */ - if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; return false; + } /* For now we only handle 'Downstream port status' case. * If we got sink count changed it means @@ -3289,29 +4225,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd sizeof(hpd_irq_dpcd_data), "Status: "); - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link) - break; - } - - if (pipe_ctx == NULL || pipe_ctx->stream == NULL) - return false; - - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) - core_link_disable_stream(pipe_ctx); - } - - for (i = 0; i < MAX_PIPES; i++) { - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) - core_link_enable_stream(link->dc->current_state, pipe_ctx); - } + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_link_loss(link); status = false; if (out_link_loss) @@ -3535,6 +4452,43 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + union dp_dfp_cap_ext dfp_cap_ext; + memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); + core_link_read_dpcd( + link, + DP_DFP_CAPABILITY_EXTENSION_SUPPORT, + dfp_cap_ext.raw, + sizeof(dfp_cap_ext.raw)); + link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = + dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = + dfp_cap_ext.fields.max_video_h_active_width[0] + + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = + dfp_cap_ext.fields.max_video_v_active_height[0] + + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = + dfp_cap_ext.fields.encoding_format_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = + dfp_cap_ext.fields.rgb_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = + dfp_cap_ext.fields.ycbcr444_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = + dfp_cap_ext.fields.ycbcr422_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = + dfp_cap_ext.fields.ycbcr420_color_depth_caps; + DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); + DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); + DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); + DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); + DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); + } +#endif } static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, @@ -3594,7 +4548,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) bool dp_retrieve_lttpr_cap(struct dc_link *link) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t lttpr_dpcd_data[8]; + bool allow_lttpr_non_transparent_mode = 0; +#else uint8_t lttpr_dpcd_data[6]; +#endif bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; enum dc_status status = DC_ERROR_UNEXPECTED; @@ -3602,6 +4561,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 && + link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) { + allow_lttpr_non_transparent_mode = 1; + } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + allow_lttpr_non_transparent_mode = 1; + } +#endif + /* * Logic to determine LTTPR mode */ @@ -3609,17 +4578,31 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) if (vbios_lttpr_enable && vbios_lttpr_interop) link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else if (!vbios_lttpr_enable && vbios_lttpr_interop) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (allow_lttpr_non_transparent_mode) +#else if (link->dc->config.allow_lttpr_non_transparent_mode) +#endif link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else link->lttpr_mode = LTTPR_MODE_TRANSPARENT; } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) +#else if (!link->dc->config.allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) +#endif link->lttpr_mode = LTTPR_MODE_NON_LTTPR; else link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; } +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Check DP tunnel LTTPR mode debug option. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->dc->debug.dpia_debug.bits.force_non_lttpr) + link->lttpr_mode = LTTPR_MODE_NON_LTTPR; +#endif if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { /* By reading LTTPR capability, RX assumes that we will enable @@ -3631,7 +4614,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); if (status != DC_OK) { - dm_error("%s: Read LTTPR caps data failed.\n", __func__); + DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); return false; } @@ -3659,8 +4642,19 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = + lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = + lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; +#endif + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); @@ -3709,6 +4703,8 @@ static bool retrieve_link_cap(struct dc_link *link) LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); is_lttpr_present = dp_retrieve_lttpr_cap(link); + /* Read DP tunneling information. */ + status = dpcd_get_tunneling_device_data(link); status = core_link_read_dpcd(link, DP_SET_POWER, &dpcd_power_state, sizeof(dpcd_power_state)); @@ -3909,16 +4905,82 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + status = core_link_read_dpcd( + link, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); + DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); + } +#else status = core_link_read_dpcd( link, DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); +#endif } if (!dpcd_read_sink_ext_caps(link)) link->dpcd_sink_ext_caps.raw = 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) + link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV]; + + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + + core_link_read_dpcd(link, + DP_128b_132b_SUPPORTED_LINK_RATES, + &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, + sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); + if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) + link->reported_link_cap.link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) + link->reported_link_cap.link_rate = LINK_RATE_UHBR10; + else + dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); + DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); + DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", + link->reported_link_cap.link_rate / 100, + link->reported_link_cap.link_rate % 100); + + core_link_read_dpcd(link, + DP_SINK_VIDEO_FALLBACK_FORMATS, + &link->dpcd_caps.fallback_formats.raw, + sizeof(link->dpcd_caps.fallback_formats.raw)); + DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); + if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) + DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) + DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) + DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.raw == 0) { + DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); + link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; + } + + core_link_read_dpcd(link, + DP_FEC_CAPABILITY_1, + &link->dpcd_caps.fec_cap1.raw, + sizeof(link->dpcd_caps.fec_cap1.raw)); + DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); + if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) + DC_LOG_DP2("\tFEC aggregated error counters are supported"); + } +#endif + /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -4349,7 +5411,7 @@ bool dc_link_dp_set_test_pattern( * MuteAudioEndpoint(pPathMode->pDisplayPath, true); */ /* Blank stream */ - pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); + pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); } dp_set_hw_test_pattern(link, test_pattern, @@ -4389,6 +5451,35 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_CP2520_3: pattern = PHY_TEST_PATTERN_CP2520_3; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_TEST_PATTERN_128b_132b_TPS1: + pattern = PHY_TEST_PATTERN_128b_132b_TPS1; + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + pattern = PHY_TEST_PATTERN_128b_132b_TPS2; + break; + case DP_TEST_PATTERN_PRBS9: + pattern = PHY_TEST_PATTERN_PRBS9; + break; + case DP_TEST_PATTERN_PRBS11: + pattern = PHY_TEST_PATTERN_PRBS11; + break; + case DP_TEST_PATTERN_PRBS15: + pattern = PHY_TEST_PATTERN_PRBS15; + break; + case DP_TEST_PATTERN_PRBS23: + pattern = PHY_TEST_PATTERN_PRBS23; + break; + case DP_TEST_PATTERN_PRBS31: + pattern = PHY_TEST_PATTERN_PRBS31; + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; + break; + case DP_TEST_PATTERN_SQUARE_PULSE: + pattern = PHY_TEST_PATTERN_SQUARE_PULSE; + break; +#endif default: return false; } @@ -4398,6 +5489,14 @@ bool dc_link_dp_set_test_pattern( return false; if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + +#endif /* tell receiver that we are sending qualification * pattern DP 1.2 or later - DP receiver's link quality * pattern is set using DPCD LINK_QUAL_LANEx_SET @@ -4651,7 +5750,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -4671,7 +5770,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) link_enc->funcs->fec_set_ready(link_enc, true); link->fec_state = dc_link_fec_ready; } else { - link_enc->funcs->fec_set_ready(link->link_enc, false); + link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; dm_error("dpcd write failed to set fec_ready"); } @@ -4698,8 +5797,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link( - link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -4919,7 +6017,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin uint8_t link_bw_set; uint8_t link_rate_set; uint32_t req_bw; - union lane_count_set lane_count_set = { {0} }; + union lane_count_set lane_count_set = {0}; ASSERT(link || crtc_timing); // invalid input @@ -4947,7 +6045,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); - decide_edp_link_settings(link, &link_setting, req_bw); + if (!crtc_timing->flags.DSC) + decide_edp_link_settings(link, &link_setting, req_bw); + else + decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { @@ -4964,6 +6065,227 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings if ((link_settings->link_rate >= LINK_RATE_LOW) && (link_settings->link_rate <= LINK_RATE_HIGH3)) return DP_8b_10b_ENCODING; +#if defined(CONFIG_DRM_AMD_DC_DCN) + else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && + (link_settings->link_rate <= LINK_RATE_UHBR20)) + return DP_128b_132b_ENCODING; +#endif return DP_UNKNOWN_ENCODING; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + + if (!dc_is_dp_signal(link->connector_signal)) + return DP_UNKNOWN_ENCODING; + + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + link_settings = link->preferred_link_setting; + } else { + decide_mst_link_settings(link, &link_settings); + } + + return dp_get_link_encoding_format(&link_settings); +} + +// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) +static void get_lane_status( + struct dc_link *link, + uint32_t lane_count, + union lane_status *status, + union lane_align_status_updated *status_updated) +{ + unsigned int lane; + uint8_t dpcd_buf[3] = {0}; + + if (status == NULL || status_updated == NULL) { + return; + } + + core_link_read_dpcd( + link, + DP_LANE0_1_STATUS, + dpcd_buf, + sizeof(dpcd_buf)); + + for (lane = 0; lane < lane_count; lane++) { + status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane); + } + + status_updated->raw = dpcd_buf[2]; +} + +bool dpcd_write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate) +{ + const uint8_t vc_id = 1; /// VC ID always 1 for SST + const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST + bool result = false; + uint8_t req_slot_count = 0; + struct fixed31_32 avg_time_slots_per_mtp = { 0 }; + union payload_table_update_status update_status = { 0 }; + const uint32_t max_retries = 30; + uint32_t retries = 0; + + if (allocate) { + avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + } else { + /// Leave req_slot_count = 0 if allocate is false. + } + + /// Write DPCD 2C0 = 1 to start updating + update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; + core_link_write_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + /// Program the changes in DPCD 1C0 - 1C2 + ASSERT(vc_id == 1); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_SET, + &vc_id, + 1); + + ASSERT(start_time_slot == 0); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, + &start_time_slot, + 1); + + ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, + &req_slot_count, + 1); + + /// Poll till DPCD 2C0 read 1 + /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) + + while (retries < max_retries) { + if (core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1) == DC_OK) { + if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { + DC_LOG_DP2("SST Update Payload: downstream payload table updated."); + result = true; + break; + } + } else { + union dpcd_rev dpcdRev; + + if (core_link_read_dpcd( + link, + DP_DPCD_REV, + &dpcdRev.raw, + 1) != DC_OK) { + DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " + "of sink while polling payload table " + "updated status bit."); + break; + } + } + retries++; + udelay(5000); + } + + if (!result && retries == max_retries) { + DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " + "continue on. Something is wrong with the branch."); + // TODO - DP2.0 Payload: Read and log the payload table from downstream branch + } + + proposed_table->stream_count = 1; /// Always 1 stream for SST + proposed_table->stream_allocations[0].slot_count = req_slot_count; + proposed_table->stream_allocations[0].vcp_id = vc_id; + + return result; +} + +bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) +{ + /* + * wait for ACT handled + */ + int i; + const int act_retries = 30; + enum act_return_status result = ACT_FAILED; + union payload_table_update_status update_status = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated lane_status_updated; + + for (i = 0; i < act_retries; i++) { + get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); + + if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(lane_status_updated)) { + DC_LOG_ERROR("SST Update Payload: Link loss occurred while " + "polling for ACT handled."); + result = ACT_LINK_LOST; + break; + } + core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + if (update_status.bits.ACT_HANDLED == 1) { + DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); + result = ACT_SUCCESS; + break; + } + + udelay(5000); + } + + if (result == ACT_FAILED) { + DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " + "continue on. Something is wrong with the branch."); + } + + return (result == ACT_SUCCESS); +} + +struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link) +{ + struct fixed31_32 link_bw_effective = + dc_fixpt_from_int( + dc_link_bandwidth_kbps(link, &link->cur_link_settings)); + struct fixed31_32 timeslot_bw_effective = + dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); + struct fixed31_32 timing_bw = + dc_fixpt_from_int( + dc_bandwidth_in_kbps_from_timing(&stream->timing)); + struct fixed31_32 avg_time_slots_per_mtp = + dc_fixpt_div(timing_bw, timeslot_bw_effective); + + return avg_time_slots_per_mtp; +} + +bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->stream->link->hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c index 72970e49800a..7f25c11f4248 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -176,12 +176,15 @@ static void dpcd_reduce_address_range( uint8_t * const reduced_data, const uint32_t reduced_size) { - const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size); - const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size); const uint32_t offset = reduced_address - extended_address; - if (extended_end_address == reduced_end_address && extended_address == reduced_address) - return; /* extended and reduced address ranges point to the same data */ + /* + * If the address is same, address was not extended. + * So we do not need to free any memory. + * The data is in original buffer(reduced_data). + */ + if (extended_data == reduced_data) + return; memcpy(&extended_data[offset], reduced_data, reduced_size); kfree(extended_data); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c new file mode 100644 index 000000000000..d72122593959 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -0,0 +1,962 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "dc_link_dpia.h" +#include "inc/core_status.h" +#include "dc_link.h" +#include "dc_link_dp.h" +#include "dpcd_defs.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "inc/link_dpcd.h" + +#define DC_LOGGER \ + link->ctx->logger + +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) +{ + enum dc_status status = DC_OK; + uint8_t dpcd_dp_tun_data[3] = {0}; + uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; + uint8_t i = 0; + + status = core_link_read_dpcd(link, + DP_TUNNELING_CAPABILITIES_SUPPORT, + dpcd_dp_tun_data, + sizeof(dpcd_dp_tun_data)); + + status = core_link_read_dpcd(link, + DP_USB4_ROUTER_TOPOLOGY_ID, + dpcd_topology_data, + sizeof(dpcd_topology_data)); + + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - + DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) + link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; + + return status; +} + +/* Configure link as prescribed in link_setting; set LTTPR mode; and + * Initialize link training settings. + * Abort link training if sink unplug detected. + * + * @param link DPIA link being trained. + * @param[in] link_setting Lane count, link rate and downspread control. + * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis). + */ +static enum link_training_result dpia_configure_link(struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings) +{ + enum dc_status status; + bool fec_enable; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + link->lttpr_mode); + + dp_decide_training_settings(link, + link_setting, + lt_settings); + + status = dpcd_configure_channel_coding(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + /* Configure lttpr mode */ + status = dpcd_configure_lttpr_mode(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + /* Set link rate, lane count and spread. */ + status = dpcd_set_link_settings(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + if (link->preferred_training_settings.fec_enable) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + status = dp_set_fec_ready(link, fec_enable); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + return LINK_TRAINING_SUCCESS; +} + +static enum dc_status core_link_send_set_config(struct dc_link *link, + uint8_t msg_type, + uint8_t msg_data) +{ + struct set_config_cmd_payload payload; + enum set_config_status set_config_result = SET_CONFIG_PENDING; + + /* prepare set_config payload */ + payload.msg_type = msg_type; + payload.msg_data = msg_data; + + if (!link->ddc->ddc_pin && !link->aux_access_disabled && + (dm_helpers_dmub_set_config_sync(link->ctx, link, + &payload, &set_config_result) == -1)) { + return DC_ERROR_UNEXPECTED; + } + + /* set_config should return ACK if successful */ + return (set_config_result == SET_CONFIG_ACK_RECEIVED) ? DC_OK : DC_ERROR_UNEXPECTED; +} + +/* Build SET_CONFIG message data payload for specified message type. */ +static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type, + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + union dpia_set_config_data data; + + data.raw = 0; + + switch (type) { + case DPIA_SET_CFG_SET_LINK: + data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0; + break; + case DPIA_SET_CFG_SET_PHY_TEST_MODE: + break; + case DPIA_SET_CFG_SET_VSPE: + /* Assume all lanes have same drive settings. */ + data.set_vspe.swing = lt_settings->lane_settings[0].VOLTAGE_SWING; + data.set_vspe.pre_emph = lt_settings->lane_settings[0].PRE_EMPHASIS; + data.set_vspe.max_swing_reached = + lt_settings->lane_settings[0].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; + data.set_vspe.max_pre_emph_reached = + lt_settings->lane_settings[0].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; + break; + default: + ASSERT(false); /* Message type not supported by helper function. */ + break; + } + + return data.raw; +} + +/* Convert DC training pattern to DPIA training stage. */ +static enum dpia_set_config_ts convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps) +{ + enum dpia_set_config_ts ts; + + switch (tps) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + ts = DPIA_TS_TPS1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + ts = DPIA_TS_TPS2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + ts = DPIA_TS_TPS3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + ts = DPIA_TS_TPS4; + break; + default: + ts = DPIA_TS_DPRX_DONE; + ASSERT(false); /* TPS not supported by helper function. */ + break; + } + + return ts; +} + +/* Write training pattern to DPCD. */ +static enum dc_status dpcd_set_lt_pattern(struct dc_link *link, + enum dc_dp_training_pattern pattern, + uint32_t hop) +{ + union dpcd_training_pattern dpcd_pattern = { {0} }; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + enum dc_status status; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + /* DpcdAddress_TrainingPatternSet */ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); + + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dc_dp_initialize_scrambling_data_symbols(link, pattern); + + if (hop != DPRX) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + hop, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } + + status = core_link_write_dpcd(link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + return status; +} + +/* Execute clock recovery phase of link training for specified hop in display + * path.in non-transparent mode: + * - Driver issues both DPCD and SET_CONFIG transactions. + * - TPS1 is transmitted for any hops downstream of DPOA. + * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. + * - CR for the first hop (DPTX-to-DPIA) is assumed to be successful. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + enum dc_status status; + uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ + uint32_t retry_count = 0; + /* From DP spec, CR read interval is always 100us. */ + uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; + uint8_t set_cfg_data; + enum dpia_set_config_ts ts; + + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. + * Fix inherited from perform_clock_recovery_sequence() - + * the DP equivalent of this function: + * Required for Synaptics MST hub which can put the LT in + * infinite loop by switching the VS between level 0 and level 1 + * continuously. + */ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* DPTX-to-DPIA */ + if (hop == repeater_cnt) { + /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that + * non-transparent link training has started. + * This also enables the transmission of clk_sync packets. + */ + set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK, + link, + lt_settings); + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_LINK, + set_cfg_data); + /* CR for this hop is considered successful as long as + * SET_CONFIG message is acknowledged by DPOA. + */ + if (status == DC_OK) + result = LINK_TRAINING_SUCCESS; + else + result = LINK_TRAINING_ABORT; + break; + } + + /* DPOA-to-x */ + /* Instruct DPOA to transmit TPS1 then update DPCD. */ + if (retry_count == 0) { + ts = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr); + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + /* Update DPOA drive settings then DPCD. DPOA does only adjusts + * drive settings for hops immediately downstream. + */ + if (hop == repeater_cnt - 1) { + set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + status = dpcd_set_lane_settings(link, lt_settings, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Check if clock recovery successful. */ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_SUCCESS; + break; + } + + result = dp_get_cr_failure(lane_count, dpcd_lane_status); + + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* Count number of attempts with same drive settings. + * Note: settings are the same for all lanes, + * so comparing first lane is sufficient. + */ + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) + retries_cr++; + else + retries_cr = 0; + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->lane_settings, + lt_settings->dpcd_lane_settings); + retry_count++; + } + + /* Abort link training if clock recovery failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" + " -hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + retry_count); + + return result; +} + +/* Execute clock recovery phase of link training in transparent LTTPR mode: + * - Driver only issues DPCD transactions and leaves USB4 tunneling (SET_CONFIG) messages to DPIA. + * - Driver writes TPS1 to DPCD to kick off training. + * - Clock recovery (CR) for link is handled by DPOA, which reports result to DPIA on completion. + * - DPIA communicates result to driver by updating CR status when driver reads DPCD. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + */ +static enum link_training_result dpia_training_cr_transparent(struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + enum dc_status status; + uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ + uint32_t retry_count = 0; + uint32_t wait_time_microsec = lt_settings->cr_pattern_time; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; + + /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. + * Fix inherited from perform_clock_recovery_sequence() - + * the DP equivalent of this function: + * Required for Synaptics MST hub which can put the LT in + * infinite loop by switching the VS between level 0 and level 1 + * continuously. + */ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + /* Write TPS1 (not VS or PE) to DPCD to start CR phase. + * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to + * start link training. + */ + if (retry_count == 0) { + status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Check if clock recovery successful. */ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_SUCCESS; + break; + } + + result = dp_get_cr_failure(lane_count, dpcd_lane_status); + + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* Count number of attempts with same drive settings. + * Note: settings are the same for all lanes, + * so comparing first lane is sufficient. + */ + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) + retries_cr++; + else + retries_cr = 0; + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + /* Abort link training if clock recovery failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" + " -hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + DPRX, + result, + retry_count); + + return result; +} + +/* Execute clock recovery phase of link training for specified hop in display + * path. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_cr_phase(struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + result = dpia_training_cr_non_transparent(link, lt_settings, hop); + else + result = dpia_training_cr_transparent(link, lt_settings); + + return result; +} + +/* Return status read interval during equalization phase. */ +static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop) +{ + uint32_t wait_time_microsec; + + if (hop == DPRX) + wait_time_microsec = lt_settings->eq_pattern_time; + else + wait_time_microsec = + dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Check debug option for extending aux read interval. */ + if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) + wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; +#endif + + return wait_time_microsec; +} + +/* Execute equalization phase of link training for specified hop in display + * path in non-transparent mode: + * - driver issues both DPCD and SET_CONFIG transactions. + * - TPSx is transmitted for any hops downstream of DPOA. + * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. + * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. + * - DPRX EQ only reported successful when both DPRX and DPIA requirements + * (clk sync packets sent) fulfilled. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + uint32_t retries_eq = 0; + enum dc_status status; + enum dc_dp_training_pattern tr_pattern; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; + uint8_t set_cfg_data; + enum dpia_set_config_ts ts; + + /* Training pattern is TPS4 for repeater; + * TPS2/3/4 for DPRX depending on what it supports. + */ + if (hop == DPRX) + tr_pattern = lt_settings->pattern_for_eq; + else + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + /* DPTX-to-DPIA equalization always successful. */ + if (hop == repeater_cnt) { + result = LINK_TRAINING_SUCCESS; + break; + } + + /* Instruct DPOA to transmit TPSn then update DPCD. */ + if (retries_eq == 0) { + ts = convert_trng_ptn_to_trng_stg(tr_pattern); + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = dpcd_set_lt_pattern(link, tr_pattern, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + /* Update DPOA drive settings then DPCD. DPOA only adjusts + * drive settings for hop immediately downstream. + */ + if (hop == repeater_cnt - 1) { + set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + status = dpcd_set_lane_settings(link, lt_settings, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Extend wait time on second equalisation attempt on final hop to + * ensure clock sync packets have been sent. + */ + if (hop == DPRX && retries_eq == 1) + wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY); + else + wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* CR can still fail during EQ phase. Fail training if CR fails. */ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + result = LINK_TRAINING_SUCCESS; + break; + } + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + /* Abort link training if equalization failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" + " - hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + retries_eq); + + return result; +} + +/* Execute equalization phase of link training for specified hop in display + * path in transparent LTTPR mode: + * - driver only issues DPCD transactions leaves USB4 tunneling (SET_CONFIG) messages to DPIA. + * - driver writes TPSx to DPCD to notify DPIA that is in equalization phase. + * - equalization (EQ) for link is handled by DPOA, which reports result to DPIA on completion. + * - DPIA communicates result to driver by updating EQ status when driver reads DPCD. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_transparent(struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; + uint32_t retries_eq = 0; + enum dc_status status; + enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = { {0} }; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } }; + + wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); + + for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + if (retries_eq == 0) { + status = dpcd_set_lt_pattern(link, tr_pattern, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* CR can still fail during EQ phase. Fail training if CR fails. */ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + result = LINK_TRAINING_SUCCESS; + break; + } + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + /* Abort link training if equalization failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" + " - hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + DPRX, + result, + retries_eq); + + return result; +} + +/* Execute equalization phase of link training for specified hop in display + * path. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_phase(struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result; + + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + result = dpia_training_eq_non_transparent(link, lt_settings, hop); + else + result = dpia_training_eq_transparent(link, lt_settings); + + return result; +} + +/* End training of specified hop in display path. */ +static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop) +{ + union dpcd_training_pattern dpcd_pattern = { {0} }; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + enum dc_status status; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + status = core_link_write_dpcd(link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + return status; +} + +/* End training of specified hop in display path. + * + * In transparent LTTPR mode: + * - driver clears training pattern for the specified hop in DPCD. + * In non-transparent LTTPR mode: + * - in addition to clearing training pattern, driver issues USB4 tunneling + * (SET_CONFIG) messages to notify DPOA when training is done for first hop + * (DPTX-to-DPIA) and last hop (DPRX). + * + * @param link DPIA link being trained. + * @param hop The Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_end(struct dc_link *link, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + enum dc_status status; + + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + if (hop == repeater_cnt) { /* DPTX-to-DPIA */ + /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that + * DPTX-to-DPIA hop trained. No DPCD write needed for first hop. + */ + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_TRAINING, + DPIA_TS_UFP_DONE); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } else { /* DPOA-to-x */ + /* Write 0x0 to TRAINING_PATTERN_SET */ + status = dpcd_clear_lt_pattern(link, hop); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } + + /* Notify DPOA that non-transparent link training of DPRX done. */ + if (hop == DPRX && result != LINK_TRAINING_ABORT) { + status = core_link_send_set_config(link, + DPIA_SET_CFG_SET_TRAINING, + DPIA_TS_DPRX_DONE); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } + + } else { /* non-LTTPR or transparent LTTPR. */ + /* Write 0x0 to TRAINING_PATTERN_SET */ + status = dpcd_clear_lt_pattern(link, hop); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + link->lttpr_mode); + + return result; +} + +/* When aborting training of specified hop in display path, clean up by: + * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET. + * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. + * + * @param link DPIA link being trained. + * @param hop The Hop in display path. DPRX = 0. + */ +static void dpia_training_abort(struct dc_link *link, uint32_t hop) +{ + uint8_t data = 0; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + link->lttpr_mode, + link->is_hpd_pending); + + /* Abandon clean-up if sink unplugged. */ + if (link->is_hpd_pending) + return; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + core_link_write_dpcd(link, dpcd_tps_offset, &data, 1); + core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1); + core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1); + core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); +} + +enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link, + const struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + enum link_training_result result; + struct link_training_settings lt_settings; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + int8_t repeater_id; /* Current hop. */ + + /* Configure link as prescribed in link_setting and set LTTPR mode. */ + result = dpia_configure_link(link, link_setting, <_settings); + if (result != LINK_TRAINING_SUCCESS) + return result; + + if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Train each hop in turn starting with the one closest to DPTX. + * In transparent or non-LTTPR mode, train only the final hop (DPRX). + */ + for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) { + /* Clock recovery. */ + result = dpia_training_cr_phase(link, <_settings, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + + /* Equalization. */ + result = dpia_training_eq_phase(link, <_settings, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + + /* Stop training hop. */ + result = dpia_training_end(link, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + } + + /* Double-check link status if training successful; gracefully abort + * training of current hop if training failed due to message tunneling + * failure; end training of hop if training ended conventionally and + * falling back to lower bandwidth settings possible. + */ + if (result == LINK_TRAINING_SUCCESS) { + msleep(5); + result = dp_check_link_loss_status(link, <_settings); + } else if (result == LINK_TRAINING_ABORT) { + dpia_training_abort(link, repeater_id); + } else { + dpia_training_end(link, repeater_id); + } + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index de80a9ea4cfa..a55944da8d53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -1,5 +1,4 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. +/* Copyright 2021 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,78 +34,132 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream) int i; /* Loop over created link encoder objects. */ - for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { - link_enc = stream->ctx->dc->res_pool->link_encoders[i]; - - if (link_enc && - ((uint32_t)stream->signal & link_enc->output_signals)) { - if (dc_is_dp_signal(stream->signal)) { - /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ - struct dc_link_settings link_settings = {0}; - - decide_link_settings(stream, &link_settings); - if ((link_settings.link_rate >= LINK_RATE_LOW) && - link_settings.link_rate <= LINK_RATE_HIGH3) { + if (stream) { + for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { + link_enc = stream->ctx->dc->res_pool->link_encoders[i]; + + /* Need to check link signal type rather than stream signal type which may not + * yet match. + */ + if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) { + if (dc_is_dp_signal(stream->signal)) { + /* DIGs do not support DP2.0 streams with 128b/132b encoding. */ + struct dc_link_settings link_settings = {0}; + + decide_link_settings(stream, &link_settings); + if ((link_settings.link_rate >= LINK_RATE_LOW) && + link_settings.link_rate <= LINK_RATE_HIGH3) { + is_dig_stream = true; + break; + } + } else { is_dig_stream = true; break; } - } else { - is_dig_stream = true; - break; } } } - return is_dig_stream; } -/* Update DIG link encoder resource tracking variables in dc_state. */ -static void update_link_enc_assignment( +static struct link_enc_assignment get_assignment(struct dc *dc, int i) +{ + struct link_enc_assignment assignment; + + if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT) + assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i]; + else /* LINK_ENC_CFG_STEADY */ + assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + return assignment; +} + +/* Return stream using DIG link encoder resource. NULL if unused. */ +static struct dc_stream_state *get_stream_using_link_enc( + struct dc_state *state, + enum engine_id eng_id) +{ + struct dc_stream_state *stream = NULL; + int i; + + for (i = 0; i < state->stream_count; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { + stream = state->streams[i]; + break; + } + } + + return stream; +} + +static void remove_link_enc_assignment( struct dc_state *state, struct dc_stream_state *stream, - enum engine_id eng_id, - bool add_enc) + enum engine_id eng_id) { int eng_idx; - int stream_idx; int i; if (eng_id != ENGINE_ID_UNKNOWN) { eng_idx = eng_id - ENGINE_ID_DIGA; - stream_idx = -1; - /* Index of stream in dc_state used to update correct entry in + /* stream ptr of stream in dc_state used to update correct entry in * link_enc_assignments table. */ - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) { - stream_idx = i; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid && assignment.stream == stream) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + /* Only add link encoder back to availability pool if not being + * used by any other stream (i.e. removing SST stream or last MST stream). + */ + if (get_stream_using_link_enc(state, eng_id) == NULL) + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; + + stream->link_enc = NULL; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; break; } } + } +} + +static void add_link_enc_assignment( + struct dc_state *state, + struct dc_stream_state *stream, + enum engine_id eng_id) +{ + int eng_idx; + int i; + + if (eng_id != ENGINE_ID_UNKNOWN) { + eng_idx = eng_id - ENGINE_ID_DIGA; - /* Update link encoder assignments table, link encoder availability - * pool and link encoder assigned to stream in state. - * Add/remove encoder resource to/from stream. + /* stream ptr of stream in dc_state used to update correct entry in + * link_enc_assignments table. */ - if (stream_idx != -1) { - if (add_enc) { - state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){ + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){ .valid = true, .ep_id = (struct display_endpoint_id) { .link_id = stream->link->link_id, .ep_type = stream->link->ep_type}, - .eng_id = eng_id}; - state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; + .eng_id = eng_id, + .stream = stream}; + dc_stream_retain(stream); + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; - } else { - state->res_ctx.link_enc_assignments[stream_idx].valid = false; - state->res_ctx.link_enc_avail[eng_idx] = eng_id; - stream->link_enc = NULL; + break; } - } else { - dm_output_to_console("%s: Stream not found in dc_state.\n", __func__); } + + /* Attempted to add an encoder assignment for a stream not in dc_state. */ + ASSERT(i != state->stream_count); } } @@ -119,7 +172,7 @@ static enum engine_id find_first_avail_link_enc( int i; for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) { - eng_id = state->res_ctx.link_enc_avail[i]; + eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i]; if (eng_id != ENGINE_ID_UNKNOWN) break; } @@ -127,46 +180,97 @@ static enum engine_id find_first_avail_link_enc( return eng_id; } -/* Return stream using DIG link encoder resource. NULL if unused. */ -static struct dc_stream_state *get_stream_using_link_enc( +/* Check for availability of link encoder eng_id. */ +static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id, struct dc_stream_state *stream) +{ + bool is_avail = false; + int eng_idx = eng_id - ENGINE_ID_DIGA; + + /* An encoder is available if it is still in the availability pool. */ + if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN) { + is_avail = true; + } else { + struct dc_stream_state *stream_assigned = NULL; + + /* MST streams share the same link and should share the same encoder. + * If a stream that has already been assigned a link encoder uses as the + * same link as the stream checking for availability, it is an MST stream + * and should use the same link encoder. + */ + stream_assigned = get_stream_using_link_enc(state, eng_id); + if (stream_assigned && stream != stream_assigned && stream->link == stream_assigned->link) + is_avail = true; + } + + return is_avail; +} + +/* Test for display_endpoint_id equality. */ +static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs) +{ + bool are_equal = false; + + if (lhs->link_id.id == rhs->link_id.id && + lhs->link_id.enum_id == rhs->link_id.enum_id && + lhs->link_id.type == rhs->link_id.type && + lhs->ep_type == rhs->ep_type) + are_equal = true; + + return are_equal; +} + +static struct link_encoder *get_link_enc_used_by_link( struct dc_state *state, - enum engine_id eng_id) + const struct dc_link *link) { - struct dc_stream_state *stream = NULL; - int stream_idx = -1; + struct link_encoder *link_enc = NULL; + struct display_endpoint_id ep_id; int i; - for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; + ep_id = (struct display_endpoint_id) { + .link_id = link->link_id, + .ep_type = link->ep_type}; - if (assignment.valid && (assignment.eng_id == eng_id)) { - stream_idx = i; - break; - } - } + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; - if (stream_idx != -1) - stream = state->streams[stream_idx]; - else - dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id); + if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) + link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; + } - return stream; + return link_enc; } - -void link_enc_cfg_init( - struct dc *dc, - struct dc_state *state) +/* Clear all link encoder assignments. */ +static void clear_enc_assignments(const struct dc *dc, struct dc_state *state) { int i; + for (i = 0; i < MAX_PIPES; i++) { + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) { + dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream); + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; + } + } + for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (dc->res_pool->link_encoders[i]) - state->res_ctx.link_enc_avail[i] = (enum engine_id) i; + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i; else - state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; + state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } } +void link_enc_cfg_init( + const struct dc *dc, + struct dc_state *state) +{ + clear_enc_assignments(dc, state); + + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; +} + void link_enc_cfg_link_encs_assign( struct dc *dc, struct dc_state *state, @@ -175,10 +279,16 @@ void link_enc_cfg_link_encs_assign( { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; + int j; + + ASSERT(state->stream_count == stream_count); /* Release DIG link encoder resources before running assignment algorithm. */ - for (i = 0; i < stream_count; i++) - dc->res_pool->funcs->link_enc_unassign(state, streams[i]); + for (i = 0; i < dc->current_state->stream_count; i++) + dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]); + + for (i = 0; i < MAX_PIPES; i++) + ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */ for (i = 0; i < stream_count; i++) { @@ -191,26 +301,82 @@ void link_enc_cfg_link_encs_assign( /* Physical endpoints have a fixed mapping to DIG link encoders. */ if (!stream->link->is_dig_mapping_flexible) { eng_id = stream->link->eng_id; - update_link_enc_assignment(state, stream, eng_id, true); + add_link_enc_assignment(state, stream, eng_id); + } + } + + /* (b) Retain previous assignments for mappable endpoints if encoders still available. */ + eng_id = ENGINE_ID_UNKNOWN; + + if (state != dc->current_state) { + struct dc_state *prev_state = dc->current_state; + + for (i = 0; i < stream_count; i++) { + struct dc_stream_state *stream = state->streams[i]; + + /* Skip stream if not supported by DIG link encoder. */ + if (!is_dig_link_enc_stream(stream)) + continue; + + if (!stream->link->is_dig_mapping_flexible) + continue; + + for (j = 0; j < prev_state->stream_count; j++) { + struct dc_stream_state *prev_stream = prev_state->streams[j]; + + if (stream == prev_stream && stream->link == prev_stream->link && + prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) { + eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id; + if (is_avail_link_enc(state, eng_id, stream)) + add_link_enc_assignment(state, stream, eng_id); + } + } } } - /* (b) Then assign encoders to mappable endpoints. */ + /* (c) Then assign encoders to remaining mappable endpoints. */ eng_id = ENGINE_ID_UNKNOWN; for (i = 0; i < stream_count; i++) { struct dc_stream_state *stream = streams[i]; /* Skip stream if not supported by DIG link encoder. */ - if (!is_dig_link_enc_stream(stream)) + if (!is_dig_link_enc_stream(stream)) { + ASSERT(stream->link->is_dig_mapping_flexible != true); continue; + } /* Mappable endpoints have a flexible mapping to DIG link encoders. */ if (stream->link->is_dig_mapping_flexible) { - eng_id = find_first_avail_link_enc(stream->ctx, state); - update_link_enc_assignment(state, stream, eng_id, true); + struct link_encoder *link_enc = NULL; + + /* Skip if encoder assignment retained in step (b) above. */ + if (stream->link_enc) + continue; + + /* For MST, multiple streams will share the same link / display + * endpoint. These streams should use the same link encoder + * assigned to that endpoint. + */ + link_enc = get_link_enc_used_by_link(state, stream->link); + if (link_enc == NULL) + eng_id = find_first_avail_link_enc(stream->ctx, state); + else + eng_id = link_enc->preferred_engine; + add_link_enc_assignment(state, stream, eng_id); } } + + link_enc_cfg_validate(dc, state); + + /* Update transient assignments. */ + for (i = 0; i < MAX_PIPES; i++) { + dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] = + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + } + + /* Current state mode will be set to steady once this state committed. */ + state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } void link_enc_cfg_link_enc_unassign( @@ -226,16 +392,16 @@ void link_enc_cfg_link_enc_unassign( if (stream->link_enc) eng_id = stream->link_enc->preferred_engine; - update_link_enc_assignment(state, stream, eng_id, false); + remove_link_enc_assignment(state, stream, eng_id); } bool link_enc_cfg_is_transmitter_mappable( - struct dc_state *state, + struct dc *dc, struct link_encoder *link_enc) { bool is_mappable = false; enum engine_id eng_id = link_enc->preferred_engine; - struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id); + struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); if (stream) is_mappable = stream->link->is_dig_mapping_flexible; @@ -243,73 +409,228 @@ bool link_enc_cfg_is_transmitter_mappable( return is_mappable; } -struct dc_link *link_enc_cfg_get_link_using_link_enc( - struct dc_state *state, +struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc( + struct dc *dc, enum engine_id eng_id) { - struct dc_link *link = NULL; - int stream_idx = -1; + struct dc_stream_state *stream = NULL; int i; - for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); - if (assignment.valid && (assignment.eng_id == eng_id)) { - stream_idx = i; + if ((assignment.valid == true) && (assignment.eng_id == eng_id)) { + stream = assignment.stream; break; } } - if (stream_idx != -1) - link = state->streams[stream_idx]->link; - else - dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); + return stream; +} + +struct dc_link *link_enc_cfg_get_link_using_link_enc( + struct dc *dc, + enum engine_id eng_id) +{ + struct dc_link *link = NULL; + struct dc_stream_state *stream = NULL; + stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id); + + if (stream) + link = stream->link; + + // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id); return link; } struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( - struct dc_state *state, + struct dc *dc, const struct dc_link *link) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; - int stream_idx = -1; int i; ep_id = (struct display_endpoint_id) { .link_id = link->link_id, .ep_type = link->ep_type}; - for (i = 0; i < state->stream_count; i++) { - struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i]; - - if (assignment.valid && - assignment.ep_id.link_id.id == ep_id.link_id.id && - assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id && - assignment.ep_id.link_id.type == ep_id.link_id.type && - assignment.ep_id.ep_type == ep_id.ep_type) { - stream_idx = i; + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); + + if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) { + link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA]; break; } } - if (stream_idx != -1) - link_enc = state->streams[stream_idx]->link_enc; - return link_enc; } -struct link_encoder *link_enc_cfg_get_next_avail_link_enc( - const struct dc *dc, - const struct dc_state *state) +struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; - enum engine_id eng_id; + enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; + int i; + + for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) + encs_assigned[i] = ENGINE_ID_UNKNOWN; + + /* Add assigned encoders to list. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); + + if (assignment.valid) + encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id; + } + + for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { + if (encs_assigned[i] == ENGINE_ID_UNKNOWN) { + link_enc = dc->res_pool->link_encoders[i]; + break; + } + } - eng_id = find_first_avail_link_enc(dc->ctx, state); - if (eng_id != ENGINE_ID_UNKNOWN) - link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA]; + return link_enc; +} + +struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( + struct dc *dc, + const struct dc_stream_state *stream) +{ + struct link_encoder *link_enc; + + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link); return link_enc; } + +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link) +{ + bool is_avail = true; + int i; + + /* An encoder is not available if it has already been assigned to a different endpoint. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = get_assignment(dc, i); + struct display_endpoint_id ep_id = (struct display_endpoint_id) { + .link_id = link->link_id, + .ep_type = link->ep_type}; + + if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) { + is_avail = false; + break; + } + } + + return is_avail; +} + +bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) +{ + bool is_valid = false; + bool valid_entries = true; + bool valid_stream_ptrs = true; + bool valid_uniqueness = true; + bool valid_avail = true; + bool valid_streams = true; + int i, j; + uint8_t valid_count = 0; + uint8_t dig_stream_count = 0; + int matching_stream_ptrs = 0; + int eng_ids_per_ep_id[MAX_PIPES] = {0}; + int valid_bitmap = 0; + + /* (1) No. valid entries same as stream count. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) + valid_count++; + + if (is_dig_link_enc_stream(state->streams[i])) + dig_stream_count++; + } + if (valid_count != dig_stream_count) + valid_entries = false; + + /* (2) Matching stream ptrs. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) { + if (assignment.stream == state->streams[i]) + matching_stream_ptrs++; + else + valid_stream_ptrs = false; + } + } + + /* (3) Each endpoint assigned unique encoder. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment_i.valid) { + struct display_endpoint_id ep_id_i = assignment_i.ep_id; + + eng_ids_per_ep_id[i]++; + for (j = 0; j < MAX_PIPES; j++) { + struct link_enc_assignment assignment_j = + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j]; + + if (j == i) + continue; + + if (assignment_j.valid) { + struct display_endpoint_id ep_id_j = assignment_j.ep_id; + + if (are_ep_ids_equal(&ep_id_i, &ep_id_j) && + assignment_i.eng_id != assignment_j.eng_id) { + valid_uniqueness = false; + eng_ids_per_ep_id[i]++; + } + } + } + } + } + + /* (4) Assigned encoders not in available pool. */ + for (i = 0; i < MAX_PIPES; i++) { + struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; + + if (assignment.valid) { + for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) { + if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) { + valid_avail = false; + break; + } + } + } + } + + /* (5) All streams have valid link encoders. */ + for (i = 0; i < state->stream_count; i++) { + struct dc_stream_state *stream = state->streams[i]; + + if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) { + valid_streams = false; + break; + } + } + + is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams; + ASSERT(is_valid); + + if (is_valid == false) { + valid_bitmap = + (valid_entries & 0x1) | + ((valid_stream_ptrs & 0x1) << 1) | + ((valid_uniqueness & 0x1) << 2) | + ((valid_avail & 0x1) << 3) | + ((valid_streams & 0x1) << 4); + dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap); + } + + return is_valid; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 9c51cd09dcf1..368e834c6809 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -17,6 +17,7 @@ #include "link_enc_cfg.h" #include "clk_mgr.h" #include "inc/link_dpcd.h" +#include "dccg.h" static uint8_t convert_to_count(uint8_t lttpr_repeater_count) { @@ -61,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on) sizeof(state)); } +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode) +{ + if (link != NULL && link->dc->debug.enable_driver_sequence_debug) + core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, + &dp_test_mode, sizeof(dp_test_mode)); +} + void dp_enable_link_phy( struct dc_link *link, enum signal_type signal, @@ -79,7 +87,7 @@ void dp_enable_link_phy( /* Link should always be assigned encoder when en-/disabling. */ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -111,12 +119,37 @@ void dp_enable_link_phy( link->cur_link_settings = *link_settings; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: notify link rate change here */ + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); + } +#else if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); - +#endif if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + enable_dp_hpo_output(link, link_settings); + } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { + if (dc_is_dp_sst_signal(signal)) { + link_enc->funcs->enable_dp_output( + link_enc, + link_settings, + clock_source); + } else { + link_enc->funcs->enable_dp_mst_output( + link_enc, + link_settings, + clock_source); + } + } +#else if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( link_enc, @@ -128,10 +161,11 @@ void dp_enable_link_phy( link_settings, clock_source); } - +#endif if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); dp_receiver_power_ctrl(link, true); } @@ -206,11 +240,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc; +#endif struct link_encoder *link_enc; /* Link should always be assigned encoder when en-/disabling. */ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); else link_enc = link->link_enc; ASSERT(link_enc); @@ -221,18 +258,34 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) if (signal == SIGNAL_TYPE_EDP) { if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) + disable_dp_hpo_output(link, signal); + else + link_enc->funcs->disable_output(link_enc, signal); +#else link_enc->funcs->disable_output(link_enc, signal); +#endif link->dc->hwss.edp_power_control(link, false); } else { if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && + hpo_link_enc) + disable_dp_hpo_output(link, signal); + else + link_enc->funcs->disable_output(link_enc, signal); +#else link_enc->funcs->disable_output(link_enc, signal); - +#endif if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); } + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); + /* Clear current link setting.*/ memset(&link->cur_link_settings, 0, sizeof(link->cur_link_settings)); @@ -273,6 +326,14 @@ bool dp_set_hw_training_pattern( case DP_TRAINING_PATTERN_SEQUENCE_4: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; +#if defined(CONFIG_DRM_AMD_DC_DCN) + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; +#endif default: break; } @@ -282,6 +343,10 @@ bool dp_set_hw_training_pattern( return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define DC_LOGGER \ + link->ctx->logger +#endif void dp_set_hw_lane_settings( struct dc_link *link, const struct link_training_settings *link_settings, @@ -293,7 +358,23 @@ void dp_set_hw_lane_settings( return; /* call Encoder to set lane settings */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link_settings->link_settings) == + DP_128b_132b_ENCODING) { + link->hpo_dp_link_enc->funcs->set_ffe( + link->hpo_dp_link_enc, + &link_settings->link_settings, + link_settings->lane_settings[0].FFE_PRESET.raw); + } else if (dp_get_link_encoding_format(&link_settings->link_settings) + == DP_8b_10b_ENCODING) { + encoder->funcs->dp_set_lane_settings(encoder, link_settings); + } +#else encoder->funcs->dp_set_lane_settings(encoder, link_settings); +#endif + memmove(link->cur_lane_setting, + link_settings->lane_settings, + sizeof(link->cur_lane_setting)); } void dp_set_hw_test_pattern( @@ -304,13 +385,16 @@ void dp_set_hw_test_pattern( { struct encoder_set_dp_phy_pattern_param pattern_param = {0}; struct link_encoder *encoder; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings); +#endif /* Access link encoder based on whether it is statically * or dynamically assigned to a link. */ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign) - encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); else encoder = link->link_enc; @@ -319,8 +403,28 @@ void dp_set_hw_test_pattern( pattern_param.custom_pattern_size = custom_pattern_size; pattern_param.dp_panel_mode = dp_get_panel_mode(link); +#if defined(CONFIG_DRM_AMD_DC_DCN) + switch (link_encoding_format) { + case DP_128b_132b_ENCODING: + link->hpo_dp_link_enc->funcs->set_link_test_pattern( + link->hpo_dp_link_enc, &pattern_param); + break; + case DP_8b_10b_ENCODING: + ASSERT(encoder); + encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); + break; + default: + DC_LOG_ERROR("%s: Unknown link encoding format.", __func__); + break; + } +#else encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); +#endif + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#undef DC_LOGGER +#endif void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, @@ -338,7 +442,7 @@ void dp_retrain_link_dp_test(struct dc_link *link, pipes[i].stream->link == link) { udelay(100); - pipes[i].stream_res.stream_enc->funcs->dp_blank( + pipes[i].stream_res.stream_enc->funcs->dp_blank(link, pipes[i].stream_res.stream_enc); /* disable any test pattern that might be active */ @@ -351,9 +455,10 @@ void dp_retrain_link_dp_test(struct dc_link *link, if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only) (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio); - link->link_enc->funcs->disable_output( - link->link_enc, - SIGNAL_TYPE_DISPLAY_PORT); + if (link->link_enc) + link->link_enc->funcs->disable_output( + link->link_enc, + SIGNAL_TYPE_DISPLAY_PORT); /* Clear current link setting. */ memset(&link->cur_link_settings, 0, @@ -468,7 +573,12 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) + && !is_dp_128b_132b_signal(pipe_ctx)) { +#else if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { +#endif DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -495,13 +605,22 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) /* disable DSC in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( - pipe_ctx->stream_res.stream_enc, - OPTC_DSC_DISABLED, 0, 0); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL); - } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else +#endif + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( + pipe_ctx->stream_res.stream_enc, + OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } } /* disable DSC block */ @@ -535,7 +654,16 @@ out: return result; } -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc_stream_state *stream = pipe_ctx->stream; @@ -562,16 +690,35 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable) dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); if (dc_is_dp_signal(stream->signal)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, - true, - &dsc_packed_pps[0]); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + else +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); } } else { /* disable DSC PPS in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( - pipe_ctx->stream_res.stream_enc, false, NULL); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); } } @@ -589,7 +736,171 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) return false; dp_set_dsc_on_stream(pipe_ctx, true); - dp_set_dsc_pps_sdp(pipe_ctx, true); + dp_set_dsc_pps_sdp(pipe_ctx, true, false); return true; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +#undef DC_LOGGER +#define DC_LOGGER \ + link->ctx->logger + +static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) +{ + switch (link->link_enc->transmitter) { + case TRANSMITTER_UNIPHY_A: + return PHYD32CLKA; + case TRANSMITTER_UNIPHY_B: + return PHYD32CLKB; + case TRANSMITTER_UNIPHY_C: + return PHYD32CLKC; + case TRANSMITTER_UNIPHY_D: + return PHYD32CLKD; + case TRANSMITTER_UNIPHY_E: + return PHYD32CLKE; + default: + return PHYD32CLKA; + } +} + +void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings) +{ + const struct dc *dc = link->dc; + enum phyd32clk_clock_source phyd32clk; + + /* Enable PHY PLL at target bit rate + * UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz) + * UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz) + * UHBR20 = 20Gbps (SYMCLK32 = 625MHz) + */ + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + dm_set_phyd32clk(dc->ctx, 312500); + break; + case LINK_RATE_UHBR13_5: + dm_set_phyd32clk(dc->ctx, 412875); + break; + case LINK_RATE_UHBR20: + dm_set_phyd32clk(dc->ctx, 625000); + break; + default: + return; + } + } else { + /* DP2.0 HW: call transmitter control to enable PHY */ + link->hpo_dp_link_enc->funcs->enable_link_phy( + link->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter); + } + + /* DCCG muxing and DTBCLK DTO */ + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_PHYD32CLK, + true); + + phyd32clk = get_phyd32clk_src(link); + dc->res_pool->dccg->funcs->enable_symclk32_le( + dc->res_pool->dccg, + link->hpo_dp_link_enc->inst, + phyd32clk); + link->hpo_dp_link_enc->funcs->link_enable( + link->hpo_dp_link_enc, + link_settings->lane_count); + } +} + +void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal) +{ + const struct dc *dc = link->dc; + + link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc); + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + dc->res_pool->dccg->funcs->disable_symclk32_le( + dc->res_pool->dccg, + link->hpo_dp_link_enc->inst); + + dc->res_pool->dccg->funcs->set_physymclk( + dc->res_pool->dccg, + link->link_enc_hw_inst, + PHYSYMCLK_FORCE_SRC_SYMCLK, + false); + + dm_set_phyd32clk(dc->ctx, 0); + } else { + /* DP2.0 HW: call transmitter control to disable PHY */ + link->hpo_dp_link_enc->funcs->disable_link_phy( + link->hpo_dp_link_enc, + signal); + } +} + +void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct pipe_ctx *odm_pipe; + int odm_combine_num_segments = 1; + enum phyd32clk_clock_source phyd32clk; + + if (enable) { + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_combine_num_segments++; + + dc->res_pool->dccg->funcs->set_dpstreamclk( + dc->res_pool->dccg, + DTBCLK0, + pipe_ctx->stream_res.tg->inst); + + phyd32clk = get_phyd32clk_src(stream->link); + dc->res_pool->dccg->funcs->enable_symclk32_se( + dc->res_pool->dccg, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + phyd32clk); + + dc->res_pool->dccg->funcs->set_dtbclk_dto( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + stream->phy_pix_clk, + odm_combine_num_segments, + &stream->timing); + } else { + dc->res_pool->dccg->funcs->set_dtbclk_dto( + dc->res_pool->dccg, + pipe_ctx->stream_res.tg->inst, + 0, + 0, + &stream->timing); + dc->res_pool->dccg->funcs->disable_symclk32_se( + dc->res_pool->dccg, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst); + dc->res_pool->dccg->funcs->set_dpstreamclk( + dc->res_pool->dccg, + REFCLK, + pipe_ctx->stream_res.tg->inst); + } +} + +void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link) +{ + const struct dc *dc = link->dc; + struct dc_state *state = dc->current_state; + uint8_t i; + + for (i = 0; i < MAX_PIPES; i++) { + if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc && + state->res_ctx.pipe_ctx[i].stream && + state->res_ctx.pipe_ctx[i].stream->link == link && + !state->res_ctx.pipe_ctx[i].stream->dpms_off) { + setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false); + } + } +} + +#undef DC_LOGGER +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a60396d5be44..ce8f7f4fa2b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -41,6 +41,8 @@ #include "set_mode_types.h" #include "virtual/virtual_stream_encoder.h" #include "dpcd_defs.h" +#include "link_enc_cfg.h" +#include "dc_link_dp.h" #if defined(CONFIG_DRM_AMD_DC_SI) #include "dce60/dce60_resource.h" @@ -54,6 +56,7 @@ #include "dcn10/dcn10_resource.h" #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" +#include "dcn201/dcn201_resource.h" #include "dcn30/dcn30_resource.h" #include "dcn301/dcn301_resource.h" #include "dcn302/dcn302_resource.h" @@ -128,6 +131,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) case FAMILY_NV: dc_version = DCN_VERSION_2_0; + if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + dc_version = DCN_VERSION_2_01; + break; + } if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_0; if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) @@ -217,6 +224,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_2_1: res_pool = dcn21_create_resource_pool(init_data, dc); break; + case DCN_VERSION_2_01: + res_pool = dcn201_create_resource_pool(init_data, dc); + break; case DCN_VERSION_3_0: res_pool = dcn30_create_resource_pool(init_data, dc); break; @@ -347,6 +357,29 @@ bool resource_construct( } #if defined(CONFIG_DRM_AMD_DC_DCN) + pool->hpo_dp_stream_enc_count = 0; + if (create_funcs->create_hpo_dp_stream_encoder) { + for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) { + pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx); + if (pool->hpo_dp_stream_enc[i] == NULL) + DC_ERR("DC: failed to create HPO DP stream encoder!\n"); + pool->hpo_dp_stream_enc_count++; + + } + } + + pool->hpo_dp_link_enc_count = 0; + if (create_funcs->create_hpo_dp_link_encoder) { + for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) { + pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx); + if (pool->hpo_dp_link_enc[i] == NULL) + DC_ERR("DC: failed to create HPO DP link encoder!\n"); + pool->hpo_dp_link_enc_count++; + } + } +#endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) for (i = 0; i < caps->num_mpc_3dlut; i++) { pool->mpc_lut[i] = dc_create_3dlut_func(); if (pool->mpc_lut[i] == NULL) @@ -1122,9 +1155,17 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; } - if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || - pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) - res = false; + if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) { + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE || + pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + res = false; + } else { + /* Clamp minimum viewport size */ + if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; + if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) + pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; + } DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", @@ -1623,6 +1664,10 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; + // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks + if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + return false; + return true; } @@ -1665,6 +1710,22 @@ static void update_stream_engine_usage( } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static void update_hpo_dp_stream_engine_usage( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, + bool acquired) +{ + int i; + + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc) + res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired; + } +} +#endif + /* TODO: release audio object */ void update_audio_usage( struct resource_context *res_ctx, @@ -1709,6 +1770,26 @@ static int acquire_first_free_pipe( return -1; } +#if defined(CONFIG_DRM_AMD_DC_DCN) +static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + int i; + + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] && + pool->hpo_dp_stream_enc[i]) { + + return pool->hpo_dp_stream_enc[i]; + } + } + + return NULL; +} +#endif + static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1799,6 +1880,15 @@ enum dc_status dc_remove_stream_from_ctx( if (dc->res_pool->funcs->link_enc_unassign) dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(del_pipe)) { + update_hpo_dp_stream_engine_usage( + &new_ctx->res_ctx, dc->res_pool, + del_pipe->stream_res.hpo_dp_stream_enc, + false); + } +#endif + if (del_pipe->stream_res.audio) update_audio_usage( &new_ctx->res_ctx, @@ -1992,7 +2082,6 @@ static void mark_seamless_boot_stream( { struct dc_bios *dcb = dc->ctx->dc_bios; - /* TODO: Check Linux */ if (dc->config.allow_seamless_boot_optimization && !dcb->funcs->is_accelerated_mode(dcb)) { if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing)) @@ -2051,6 +2140,31 @@ enum dc_status resource_map_pool_resources( pipe_ctx->stream_res.stream_enc, true); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Allocate DP HPO Stream Encoder based on signal, hw capabilities + * and link settings + */ + if (dc_is_dp_signal(stream->signal) && + dc->caps.dp_hpo) { + struct dc_link_settings link_settings = {0}; + + decide_link_settings(stream, &link_settings); + if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { + pipe_ctx->stream_res.hpo_dp_stream_enc = + find_first_free_match_hpo_dp_stream_enc_for_link( + &context->res_ctx, pool, stream); + + if (!pipe_ctx->stream_res.hpo_dp_stream_enc) + return DC_NO_STREAM_ENC_RESOURCE; + + update_hpo_dp_stream_engine_usage( + &context->res_ctx, pool, + pipe_ctx->stream_res.hpo_dp_stream_enc, + true); + } + } +#endif + /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && @@ -2113,6 +2227,9 @@ void dc_resource_state_construct( struct dc_state *dst_ctx) { dst_ctx->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + link_enc_cfg_init(dc, dst_ctx); } @@ -2141,16 +2258,6 @@ enum dc_status dc_validate_global_state( if (!new_ctx) return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - - /* - * Update link encoder to stream assignment. - * TODO: Split out reason allocation from validation. - */ - if (dc->res_pool->funcs->link_encs_assign) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2202,6 +2309,16 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + * TODO: Split out assignment and validation. + */ + if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) + dc->res_pool->funcs->link_encs_assign( + dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif + return result; } @@ -2395,17 +2512,7 @@ static void set_avi_info_frame( /* TODO : We should handle YCC quantization */ /* but we do not have matrix calculation */ - if (stream->qy_bit == 1) { - if (color_space == COLOR_SPACE_SRGB || - color_space == COLOR_SPACE_2020_RGB_FULLRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else if (color_space == COLOR_SPACE_SRGB_LIMITED || - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - } else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; ///VIC format = stream->timing.timing_3d_format; @@ -2726,9 +2833,24 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) return true; - /* DIG link encoder resource assignment for stream changed. */ - if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; +#endif + + /* DIG link encoder resource assignment for stream changed. */ + if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) { + bool need_reprogram = false; + struct dc *dc = pipe_ctx_old->stream->ctx->dc; + enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode; + + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; + if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc) + need_reprogram = true; + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode; + + return need_reprogram; + } return false; } @@ -2871,7 +2993,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) res = DC_FAIL_CONTROLLER_VALIDATE; if (res == DC_OK) { - if (!link->link_enc->funcs->validate_output_with_stream( + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + !link->link_enc->funcs->validate_output_with_stream( link->link_enc, stream)) res = DC_FAIL_ENC_VALIDATE; } @@ -2890,6 +3013,11 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla { enum dc_status res = DC_OK; + /* check if surface has invalid dimensions */ + if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 || + plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0) + return DC_FAIL_SURFACE_VALIDATE; + /* TODO For now validates pixel format only */ if (dc->res_pool->funcs->validate_plane) return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps); @@ -2975,3 +3103,22 @@ void get_audio_check(struct audio_info *aud_modes, } } +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( + const struct resource_pool *pool) +{ + uint8_t i; + struct hpo_dp_link_encoder *enc = NULL; + + ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS); + + for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { + if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) { + enc = pool->hpo_dp_link_enc[i]; + break; + } + } + + return enc; +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index a249a0e5edd0..4b5e4d8e7735 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,14 +33,6 @@ * Private functions ******************************************************************************/ -static void dc_sink_destruct(struct dc_sink *sink) -{ - if (sink->dc_container_id) { - kfree(sink->dc_container_id); - sink->dc_container_id = NULL; - } -} - static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { @@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - dc_sink_destruct(sink); + kfree(sink->dc_container_id); kfree(sink); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 28ef9760fa34..4b372aa52801 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -61,6 +61,14 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification status = dmub_srv_stat_get_notification(dmub, notify); ASSERT(status == DMUB_STATUS_OK); + + /* For HPD/HPD RX, convert dpia port index into link index */ + if (notify->type == DMUB_NOTIFICATION_HPD || + notify->type == DMUB_NOTIFICATION_HPD_IRQ || + notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { + notify->link_index = + get_link_index_from_dpia_port_index(dc, notify->link_index); + } } /** diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f0f54f4d3d9b..57cf4cb82370 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -202,6 +202,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) new_stream->stream_id = new_stream->ctx->dc_stream_id_count; new_stream->ctx->dc_stream_id_count++; + /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ + if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign) + new_stream->link_enc = NULL; + kref_init(&new_stream->refcount); return new_stream; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ab52d9a82cf..bf2878235dba 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -44,8 +44,10 @@ /* forward declaration */ struct aux_payload; +struct set_config_cmd_payload; +struct dmub_notification; -#define DC_VER "3.2.149" +#define DC_VER "3.2.163" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -183,6 +185,10 @@ struct dc_caps { unsigned int cursor_cache_size; struct dc_plane_cap planes[MAX_PLANES]; struct dc_color_caps color; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool dp_hpo; +#endif + bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; }; @@ -206,12 +212,12 @@ struct dc_dcc_setting { unsigned int max_uncompressed_blk_size; bool independent_64b_blks; #if defined(CONFIG_DRM_AMD_DC_DCN) - //These bitfields to be used starting with DCN 3.0 + //These bitfields to be used starting with DCN struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 - uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN + uint32_t dcc_256_128_128 : 1; //available starting with DCN + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) } dcc_controls; #endif }; @@ -289,7 +295,15 @@ struct dc_cap_funcs { struct link_training_settings; - +#if defined(CONFIG_DRM_AMD_DC_DCN) +union allow_lttpr_non_transparent_mode { + struct { + bool DP1_4A : 1; + bool DP2_0 : 1; + } bits; + unsigned char raw; +}; +#endif /* Structure to hold configuration flags set by dm at dc creation. */ struct dc_config { bool gpu_vm_support; @@ -302,10 +316,15 @@ struct dc_config { bool edp_no_power_sequencing; bool force_enum_edp; bool forced_clocks; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode; +#else bool allow_lttpr_non_transparent_mode; +#endif bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; + bool enable_windowed_mpo_odm; bool allow_edp_hotplug_detection; #if defined(CONFIG_DRM_AMD_DC_DCN) bool clamp_min_dcfclk; @@ -325,6 +344,12 @@ enum visual_confirm { VISUAL_CONFIRM_SWIZZLE = 9, }; +enum dc_psr_power_opts { + psr_power_opt_invalid = 0x0, + psr_power_opt_smu_opt_static_screen = 0x1, + psr_power_opt_z10_static_screen = 0x10, +}; + enum dcc_option { DCC_ENABLE = 0, DCC_DISABLE = 1, @@ -456,10 +481,40 @@ union mem_low_power_enable_options { bool cm: 1; bool mpc: 1; bool optc: 1; + bool vpg: 1; + bool afmt: 1; + } bits; + uint32_t u32All; +}; + +union root_clock_optimization_options { + struct { + bool dpp: 1; + bool dsc: 1; + bool hdmistream: 1; + bool hdmichar: 1; + bool dpstream: 1; + bool symclk32_se: 1; + bool symclk32_le: 1; + bool symclk_fe: 1; + bool physymclk: 1; + bool dpiasymclk: 1; + uint32_t reserved: 22; } bits; uint32_t u32All; }; +union dpia_debug_options { + struct { + uint32_t disable_dpia:1; + uint32_t force_non_lttpr:1; + uint32_t extend_aux_rd_interval:1; + uint32_t disable_mst_dsc_work_around:1; + uint32_t reserved:28; + } bits; + uint32_t raw; +}; + struct dc_debug_data { uint32_t ltFailCount; uint32_t i2cErrorCount; @@ -520,6 +575,8 @@ struct dc_debug_options { bool native422_support; bool disable_dsc; enum visual_confirm visual_confirm; + int visual_confirm_rect_height; + bool sanity_checks; bool max_disp_clk; bool surface_trace; @@ -548,6 +605,7 @@ struct dc_debug_options { enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; unsigned int min_dpp_clk_khz; + unsigned int min_dram_clk_khz; int sr_exit_time_dpm0_ns; int sr_enter_plus_exit_time_dpm0_ns; int sr_exit_time_ns; @@ -613,20 +671,29 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; + bool disable_dsc_edp; + unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - remove once tested */ + bool legacy_dp2_lt; + bool set_mst_en_for_sst; +#endif union mem_low_power_enable_options enable_mem_low_power; + union root_clock_optimization_options root_clock_optimization; + bool hpo_optimization; bool force_vblank_alignment; /* Enable dmub aux for legacy ddc */ bool enable_dmub_aux_for_legacy_ddc; bool optimize_edp_link_rate; /* eDP ILR */ - /* force enable edp FEC */ - bool force_enable_edp_fec; /* FEC/PSR1 sequence enable delay in 100us */ uint8_t fec_enable_delay_in100us; + bool enable_driver_sequence_debug; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; bool enable_sw_cntl_psr; + union dpia_debug_options dpia_debug; #endif }; @@ -672,6 +739,9 @@ struct dc { #if defined(CONFIG_DRM_AMD_DC_DCN) bool idle_optimizations_allowed; #endif +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool enable_c20_dtm_b0; +#endif /* Require to maintain clocks and bandwidth for UEFI enabled HW */ @@ -878,6 +948,7 @@ union surface_update_flags { uint32_t bandwidth_change:1; uint32_t clock_change:1; uint32_t stereo_format_change:1; + uint32_t lut_3d:1; uint32_t full_update:1; } bits; @@ -1145,7 +1216,14 @@ struct dpcd_caps { struct dpcd_dsc_capabilities dsc_caps; struct dc_lttpr_caps lttpr_caps; struct psr_caps psr_caps; + struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; + union dp_main_line_channel_coding_cap channel_coding_cap; + union dp_sink_video_fallback_formats fallback_formats; + union dp_fec_capability1 fec_cap1; +#endif }; union dpcd_sink_ext_caps { @@ -1287,6 +1365,8 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src); enum dc_irq_source dc_get_hpd_irq_source_at_index( struct dc *dc, uint32_t link_index); +void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable); + /******************************************************************************* * Power Interfaces ******************************************************************************/ @@ -1337,7 +1417,7 @@ void dc_hardware_release(struct dc *dc); bool dc_set_psr_allow_active(struct dc *dc, bool enable); #if defined(CONFIG_DRM_AMD_DC_DCN) -void dc_z10_restore(struct dc *dc); +void dc_z10_restore(const struct dc *dc); void dc_z10_save_init(struct dc *dc); #endif @@ -1347,6 +1427,20 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc, uint32_t link_index, struct aux_payload *payload); +/* Get dc link index from dpia port index */ +uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, + uint8_t dpia_port_index); + +bool dc_process_dmub_set_config_async(struct dc *dc, + uint32_t link_index, + struct set_config_cmd_payload *payload, + struct dmub_notification *notify); + +enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, + uint32_t link_index, + uint8_t mst_alloc_slots, + uint8_t *mst_slots_in_use); + /******************************************************************************* * DSC Interfaces ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 360f3199ea6f..541376fabbef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) } } +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_clear_inbox0_ack(dmub); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data) { struct dmub_srv *dmub = dmub_srv->dmub; - if (dmub->hw_funcs.send_inbox0_cmd) - dmub->hw_funcs.send_inbox0_cmd(dmub, data); - // TODO: Add wait command -- poll register for ACK + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_send_inbox0_cmd(dmub, data); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error sending INBOX0 cmd\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } } bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 3e35eee7188c..7e4e2ec5915d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu void dc_dmub_trace_event_control(struct dc *dc, bool enable); +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 4f54bde1bb1c..e68e9a86a4d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -53,7 +53,17 @@ enum dc_link_rate { LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane +#if defined(CONFIG_DRM_AMD_DC_DCN) + LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane + /* Starting from DP2.0 link rate enum directly represents actual + * link rate value in unit of 10 mbps + */ + LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane + LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane + LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane +#else LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane +#endif }; enum dc_link_spread { @@ -90,17 +100,47 @@ enum dc_post_cursor2 { POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_dp_ffe_preset_level { + DP_FFE_PRESET_LEVEL0 = 0, + DP_FFE_PRESET_LEVEL1, + DP_FFE_PRESET_LEVEL2, + DP_FFE_PRESET_LEVEL3, + DP_FFE_PRESET_LEVEL4, + DP_FFE_PRESET_LEVEL5, + DP_FFE_PRESET_LEVEL6, + DP_FFE_PRESET_LEVEL7, + DP_FFE_PRESET_LEVEL8, + DP_FFE_PRESET_LEVEL9, + DP_FFE_PRESET_LEVEL10, + DP_FFE_PRESET_LEVEL11, + DP_FFE_PRESET_LEVEL12, + DP_FFE_PRESET_LEVEL13, + DP_FFE_PRESET_LEVEL14, + DP_FFE_PRESET_LEVEL15, + DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15, +}; +#endif + enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_1 = 0, DP_TRAINING_PATTERN_SEQUENCE_2, DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_4, DP_TRAINING_PATTERN_VIDEOIDLE, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_TPS1, + DP_128b_132b_TPS2, + DP_128b_132b_TPS2_CDS, +#endif }; enum dp_link_encoding { DP_UNKNOWN_ENCODING = 0, DP_8b_10b_ENCODING = 1, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_ENCODING = 2, +#endif }; struct dc_link_settings { @@ -112,21 +152,35 @@ struct dc_link_settings { bool dpcd_source_device_specific_field_support; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union dc_dp_ffe_preset { + struct { + uint8_t level : 4; + uint8_t reserved : 1; + uint8_t no_preshoot : 1; + uint8_t no_deemphasis : 1; + uint8_t method2 : 1; + } settings; + uint8_t raw; +}; +#endif + struct dc_lane_settings { enum dc_voltage_swing VOLTAGE_SWING; enum dc_pre_emphasis PRE_EMPHASIS; enum dc_post_cursor2 POST_CURSOR2; -}; - -struct dc_link_training_settings { - struct dc_link_settings link; - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dc_dp_ffe_preset FFE_PRESET; +#endif }; struct dc_link_training_overrides { enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dc_dp_ffe_preset *ffe_preset; +#endif uint16_t *cr_pattern_time; uint16_t *eq_pattern_time; @@ -140,6 +194,16 @@ struct dc_link_training_overrides { bool *fec_enable; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union payload_table_update_status { + struct { + uint8_t VC_PAYLOAD_TABLE_UPDATED:1; + uint8_t ACT_HANDLED:1; + } bits; + uint8_t raw; +}; +#endif + union dpcd_rev { struct { uint8_t MINOR:4; @@ -227,7 +291,14 @@ union lane_align_status_updated { struct { uint8_t INTERLANE_ALIGN_DONE:1; uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1; + uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1; + uint8_t LT_FAILED_128b_132b:1; + uint8_t RESERVED:1; +#else uint8_t RESERVED:4; +#endif uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1; uint8_t LINK_STATUS_UPDATED:1; } bits; @@ -240,6 +311,12 @@ union lane_adjust { uint8_t PRE_EMPHASIS_LANE:2; uint8_t RESERVED:4; } bits; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct { + uint8_t PRESET_VALUE :4; + uint8_t RESERVED :4; + } tx_ffe; +#endif uint8_t raw; }; @@ -269,6 +346,12 @@ union dpcd_training_lane { uint8_t MAX_PRE_EMPHASIS_REACHED:1; uint8_t RESERVED:2; } bits; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct { + uint8_t PRESET_VALUE :4; + uint8_t RESERVED :4; + } tx_ffe; +#endif uint8_t raw; }; @@ -551,12 +634,18 @@ union test_response { union phy_test_pattern { struct { +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* This field is 7 bits for DP2.0 */ + uint8_t PATTERN :7; + uint8_t RESERVED :1; +#else /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 * and 3 bits for DP1.2. */ uint8_t PATTERN :3; /* BY speci, bit7:2 is 0 for DP1.1. */ uint8_t RESERVED :5; +#endif } bits; uint8_t raw; }; @@ -634,7 +723,14 @@ union dpcd_fec_capability { uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t BIT_ERROR_COUNT_CAPABLE:1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1; + uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1; + uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1; + uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1; +#else uint8_t RESERVED:4; +#endif } bits; uint8_t raw; }; @@ -758,4 +854,203 @@ struct psr_caps { bool psr_exit_link_training_required; }; +/* Length of router topology ID read from DPCD in bytes. */ +#define DPCD_USB4_TOPOLOGY_ID_LEN 5 + +/* DPCD[0xE000D] DP_TUNNELING_CAPABILITIES SUPPORT register. */ +union dp_tun_cap_support { + struct { + uint8_t dp_tunneling :1; + uint8_t rsvd :5; + uint8_t panel_replay_tun_opt :1; + uint8_t dpia_bw_alloc :1; + } bits; + uint8_t raw; +}; + +/* DPCD[0xE000E] DP_IN_ADAPTER_INFO register. */ +union dpia_info { + struct { + uint8_t dpia_num :5; + uint8_t rsvd :3; + } bits; + uint8_t raw; +}; + +/* DP Tunneling over USB4 */ +struct dpcd_usb4_dp_tunneling_info { + union dp_tun_cap_support dp_tun_cap; + union dpia_info dpia_info; + uint8_t usb4_driver_id; + uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; +}; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +#ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP +#define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006 +#endif +#ifndef DP_SINK_VIDEO_FALLBACK_FORMATS +#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 +#endif +#ifndef DP_FEC_CAPABILITY_1 +#define DP_FEC_CAPABILITY_1 0x091 +#endif +#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT +#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 +#endif +#ifndef DP_LINK_SQUARE_PATTERN +#define DP_LINK_SQUARE_PATTERN 0x10F +#endif +#ifndef DP_DSC_CONFIGURATION +#define DP_DSC_CONFIGURATION 0x161 +#endif +#ifndef DP_PHY_SQUARE_PATTERN +#define DP_PHY_SQUARE_PATTERN 0x249 +#endif +#ifndef DP_128b_132b_SUPPORTED_LINK_RATES +#define DP_128b_132b_SUPPORTED_LINK_RATES 0x2215 +#endif +#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL +#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 +#endif +#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 +#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 +#endif +#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256 +#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250 +#endif +#ifndef DP_DSC_SUPPORT_AND_DECODER_COUNT +#define DP_DSC_SUPPORT_AND_DECODER_COUNT 0x2260 +#endif +#ifndef DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 +#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 +#endif +#ifndef DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK +#define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0) +#endif +#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK +#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1) +#endif +#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT +#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1 +#endif +#ifndef DP_DSC_DECODER_COUNT_MASK +#define DP_DSC_DECODER_COUNT_MASK (0b111 << 5) +#endif +#ifndef DP_DSC_DECODER_COUNT_SHIFT +#define DP_DSC_DECODER_COUNT_SHIFT 5 +#endif +#ifndef DP_MAIN_LINK_CHANNEL_CODING_SET +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +#endif +#ifndef DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER +#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xF0006 +#endif +#ifndef DP_PHY_REPEATER_128b_132b_RATES +#define DP_PHY_REPEATER_128b_132b_RATES 0xF0007 +#endif +#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xF0022 +#endif +#ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION +#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) +#endif +/* TODO - Use DRM header to replace above once available */ + +union dp_main_line_channel_coding_cap { + struct { + uint8_t DP_8b_10b_SUPPORTED :1; + uint8_t DP_128b_132b_SUPPORTED :1; + uint8_t RESERVED :6; + } bits; + uint8_t raw; +}; + +union dp_main_link_channel_coding_lttpr_cap { + struct { + uint8_t DP_128b_132b_SUPPORTED :1; + uint8_t RESERVED :7; + } bits; + uint8_t raw; +}; + +union dp_128b_132b_supported_link_rates { + struct { + uint8_t UHBR10 :1; + uint8_t UHBR20 :1; + uint8_t UHBR13_5:1; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union dp_128b_132b_supported_lttpr_link_rates { + struct { + uint8_t UHBR10 :1; + uint8_t UHBR13_5:1; + uint8_t UHBR20 :1; + uint8_t RESERVED:5; + } bits; + uint8_t raw; +}; + +union dp_sink_video_fallback_formats { + struct { + uint8_t dp_1024x768_60Hz_24bpp_support :1; + uint8_t dp_1280x720_60Hz_24bpp_support :1; + uint8_t dp_1920x1080_60Hz_24bpp_support :1; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +union dp_fec_capability1 { + struct { + uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1; + uint8_t RESERVED :7; + } bits; + uint8_t raw; +}; + +struct dp_color_depth_caps { + uint8_t support_6bpc :1; + uint8_t support_8bpc :1; + uint8_t support_10bpc :1; + uint8_t support_12bpc :1; + uint8_t support_16bpc :1; + uint8_t RESERVED :3; +}; + +struct dp_encoding_format_caps { + uint8_t support_rgb :1; + uint8_t support_ycbcr444:1; + uint8_t support_ycbcr422:1; + uint8_t support_ycbcr420:1; + uint8_t RESERVED :4; +}; + +union dp_dfp_cap_ext { + struct { + uint8_t supported; + uint8_t max_pixel_rate_in_mps[2]; + uint8_t max_video_h_active_width[2]; + uint8_t max_video_v_active_height[2]; + struct dp_encoding_format_caps encoding_format_caps; + struct dp_color_depth_caps rgb_color_depth_caps; + struct dp_color_depth_caps ycbcr444_color_depth_caps; + struct dp_color_depth_caps ycbcr422_color_depth_caps; + struct dp_color_depth_caps ycbcr420_color_depth_caps; + } fields; + uint8_t raw[12]; +}; + +union dp_128b_132b_training_aux_rd_interval { + struct { + uint8_t VALUE :7; + uint8_t UNIT :1; + } bits; + uint8_t raw; +}; +#endif + #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 16cc76ce3739..684713b2cff7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -51,7 +51,6 @@ struct dc_dsc_policy { int min_slice_height; // Must not be less than 8 uint32_t max_target_bpp; uint32_t min_target_bpp; - uint32_t preferred_bpp_x16; bool enable_dsc_when_not_needed; }; @@ -81,6 +80,16 @@ bool dc_dsc_compute_config( uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp); +uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + const struct dc_crtc_timing *timing, + const int num_slices_h, + const bool is_dp); + +/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM, + * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis. + * Hardware/specs limitation should not be writable by DM. + * It should be decoupled from DM specific policy and named differently. + */ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy); diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 83845d006c54..37af564c4b33 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -45,6 +45,10 @@ struct dc_link_status { struct link_mst_stream_allocation { /* DIG front */ const struct stream_encoder *stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* HPO DP Stream Encoder */ + const struct hpo_dp_stream_encoder *hpo_dp_stream_enc; +#endif /* associate DRM payload table with DC stream encoder */ uint8_t vcp_id; /* number of slots required for the DP stream in transport packet */ @@ -81,6 +85,7 @@ struct psr_settings { */ bool psr_frame_capture_indication_req; unsigned int psr_sdp_transmit_line_num_deadline; + unsigned int psr_power_opt; }; /* @@ -108,6 +113,7 @@ struct dc_link { * DIG encoder. */ bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ + bool is_hpd_pending; /* Indicates a new received hpd */ bool edp_sink_present; @@ -117,8 +123,12 @@ struct dc_link { struct dc_link_settings reported_link_cap; struct dc_link_settings verified_link_cap; struct dc_link_settings cur_link_settings; - struct dc_lane_settings cur_lane_setting; + struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX]; struct dc_link_settings preferred_link_setting; + /* preferred_training_settings are override values that + * come from DM. DM is responsible for the memory + * management of the override pointers. + */ struct dc_link_training_overrides preferred_training_settings; struct dp_audio_test_data audio_test_data; @@ -150,6 +160,9 @@ struct dc_link { struct panel_cntl *panel_cntl; struct link_encoder *link_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_link_encoder *hpo_dp_link_enc; +#endif struct graphics_object_id link_id; /* Endpoint type distinguishes display endpoints which do not have entries * in the BIOS connector table from those that do. Helps when tracking link @@ -170,11 +183,17 @@ struct dc_link { struct psr_settings psr_settings; + /* Drive settings read from integrated info table */ + struct dc_lane_settings bios_forced_drive_settings; + /* MST record stream using this link */ struct link_flags { bool dp_keep_receiver_powered; bool dp_skip_DID2; bool dp_skip_reset_segment; + bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -208,6 +227,8 @@ static inline void get_edp_links(const struct dc *dc, *edp_num = 0; for (i = 0; i < dc->link_count; i++) { // report any eDP links, even unconnected DDI's + if (!dc->links[i]) + continue; if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { edp_links[*edp_num] = dc->links[i]; if (++(*edp_num) == MAX_NUM_EDP) @@ -260,8 +281,8 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); int dc_link_get_target_backlight_pwm(const struct dc_link *link); -bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, - bool wait, bool force_static); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state); @@ -271,6 +292,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link, void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +void dc_link_blank_all_dp_displays(struct dc *dc); + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); + /* Request DC to detect if there is a Panel connected. * boot - If this call is during initial boot. * Return false for any type of detection failure or MST detection @@ -282,12 +307,16 @@ enum dc_detect_reason { DETECT_REASON_HPD, DETECT_REASON_HPDRX, DETECT_REASON_FALLBACK, - DETECT_REASON_RETRAIN + DETECT_REASON_RETRAIN, }; bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +#endif /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -296,7 +325,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); * false - no change in Downstream port status. No further action required * from DM. */ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, - union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); + union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work); /* * On eDP links this function call will stall until T12 has elapsed. @@ -305,9 +335,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, */ bool dc_link_wait_for_t12(struct dc_link *link); -enum dc_status read_hpd_rx_irq_data( - struct dc_link *link, - union hpd_irq_data *irq_data); +void dc_link_dp_handle_automated_test(struct dc_link *link); +void dc_link_dp_handle_link_loss(struct dc_link *link); +bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link); struct dc_sink_init_data; @@ -416,4 +446,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); +#endif #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index b8ebc1f09538..e37c4a10bfd5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -115,6 +115,13 @@ struct periodic_interrupt_config { int lines_offset; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dc_mst_stream_bw_update { + bool is_increase; // is bandwidth reduced or increased + uint32_t mst_stream_bw; // new mst bandwidth in kbps +}; +#endif + union stream_update_flags { struct { uint32_t scaling:1; @@ -125,6 +132,9 @@ union stream_update_flags { uint32_t gamut_remap:1; uint32_t wb_update:1; uint32_t dsc_changed : 1; +#if defined(CONFIG_DRM_AMD_DC_DCN) + uint32_t mst_bw : 1; +#endif } bits; uint32_t raw; @@ -278,6 +288,9 @@ struct dc_stream_update { struct dc_writeback_update *wb_update; struct dc_dsc_config *dsc_config; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_mst_stream_bw_update *mst_bw_update; +#endif struct dc_transfer_func *func_shaper; struct dc_3dlut *lut3d_func; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index c1532930169b..388457ffc0a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -395,9 +395,27 @@ struct dc_lttpr_caps { uint8_t max_link_rate; uint8_t phy_repeater_cnt; uint8_t max_ext_timeout; +#if defined(CONFIG_DRM_AMD_DC_DCN) + union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; + union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; +#endif uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct dc_dongle_dfp_cap_ext { + bool supported; + uint16_t max_pixel_rate_in_mps; + uint16_t max_video_h_active_width; + uint16_t max_video_v_active_height; + struct dp_encoding_format_caps encoding_format_caps; + struct dp_color_depth_caps rgb_color_depth_caps; + struct dp_color_depth_caps ycbcr444_color_depth_caps; + struct dp_color_depth_caps ycbcr422_color_depth_caps; + struct dp_color_depth_caps ycbcr420_color_depth_caps; +}; +#endif + struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ enum display_dongle_type dongle_type; @@ -411,6 +429,9 @@ struct dc_dongle_caps { bool is_dp_hdmi_ycbcr420_converter; uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dc_dongle_dfp_cap_ext dfp_cap_ext; +#endif }; /* Scaling format */ enum scaling_transformation { @@ -632,6 +653,7 @@ enum dc_psr_state { PSR_STATE1a, PSR_STATE2, PSR_STATE2a, + PSR_STATE2b, PSR_STATE3, PSR_STATE3Init, PSR_STATE4, @@ -934,6 +956,7 @@ enum dc_psr_version { /* Possible values of display_endpoint_id.endpoint */ enum display_endpoint_type { DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */ + DISPLAY_ENDPOINT_USB4_DPIA, /* USB4 DisplayPort tunnel. */ DISPLAY_ENDPOINT_UNKNOWN = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h index 456fadbbfac7..b699d1b2ba83 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h @@ -96,6 +96,22 @@ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ NBIO_SR(BIOS_SCRATCH_2) +#define ABM_DCN302_REG_LIST(id)\ + ABM_COMMON_REG_LIST_DCE_BASE(), \ + SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ + SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \ + SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \ + SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \ + SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \ + SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \ + SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \ + SRI(BL1_PWM_USER_LEVEL, ABM, id), \ + SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \ + SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \ + SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \ + SRI(DC_ABM1_ACE_THRES_12, ABM, id), \ + NBIO_SR(BIOS_SCRATCH_2) + #define ABM_DCN30_REG_LIST(id)\ ABM_COMMON_REG_LIST_DCE_BASE(), \ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 7866cf2a668f..70eaac017624 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio, /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0, AZALIA_ENDPOINT_REG_DATA, reg_data); - - DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n", - reg_index, reg_data); } static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index) @@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); - DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n", - reg_index, value); - return value; } @@ -514,13 +508,15 @@ void dce_aud_az_configure( union audio_sample_rates sample_rates = audio_mode->sample_rates; uint8_t byte2 = audio_mode->max_bit_rate; + uint8_t channel_count = audio_mode->channel_count; /* adjust specific properties */ switch (audio_format_code) { case AUDIO_FORMAT_CODE_LINEARPCM: { + check_audio_bandwidth( crtc_info, - audio_mode->channel_count, + channel_count, signal, &sample_rates); @@ -548,7 +544,7 @@ void dce_aud_az_configure( /* fill audio format data */ set_reg_field_value(value, - audio_mode->channel_count - 1, + channel_count - 1, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, MAX_CHANNELS); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 5622d5e32d81..dbd2cfed0603 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -113,6 +113,7 @@ struct dce_audio_shift { uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; struct dce_audio_mask { @@ -132,6 +133,7 @@ struct dce_audio_mask { uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index e14f99b4b0c3..6d42a9cc9916 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,7 +42,7 @@ #define DC_LOGGER \ engine->ctx->logger -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) #define IS_DC_I2CAUX_LOGGING_ENABLED() (false) #define LOG_FLAG_Error_I2cAux LOG_ERROR #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX @@ -76,7 +76,7 @@ enum { #define DEFAULT_AUX_ENGINE_MULT 0 #define DEFAULT_AUX_ENGINE_LENGTH 69 -#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) static void release_engine( struct dce_aux *engine) @@ -534,17 +534,26 @@ struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload) { if (payload->i2c_over_aux) { + if (payload->write_status_update) { + if (payload->mot) + return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT; + else + return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; + } if (payload->write) { if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT; - return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; + else + return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; } if (payload->mot) return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT; + return I2CAUX_TRANSACTION_ACTION_I2C_READ; } if (payload->write) return I2CAUX_TRANSACTION_ACTION_DP_WRITE; + return I2CAUX_TRANSACTION_ACTION_DP_READ; } @@ -627,6 +636,7 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, #define AUX_MAX_I2C_DEFER_RETRIES 7 #define AUX_MAX_INVALID_REPLY_RETRIES 2 #define AUX_MAX_TIMEOUT_RETRIES 3 +#define AUX_DEFER_DELAY_FOR_DPIA 4 /*ms*/ static void dce_aux_log_payload(const char *payload_name, unsigned char *payload, uint32_t length, uint32_t max_length_to_log) @@ -689,15 +699,21 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, enum aux_return_code_type operation_result; bool retry_on_defer = false; struct ddc *ddc_pin = ddc->ddc_pin; - struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + struct dce_aux *aux_engine = NULL; + struct aux_engine_dce110 *aux110 = NULL; uint32_t defer_time_in_ms = 0; int aux_ack_retries = 0, aux_defer_retries = 0, aux_i2c_defer_retries = 0, aux_timeout_retries = 0, - aux_invalid_reply_retries = 0; + aux_invalid_reply_retries = 0, + aux_ack_m_retries = 0; + + if (ddc_pin) { + aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + aux110 = FROM_AUX_ENGINE(aux_engine); + } if (!payload->reply) { payload_reply = false; @@ -752,9 +768,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, aux_defer_retries, AUX_MAX_RETRIES); goto fail; - } else { + } else udelay(300); + } else if (payload->write && ret > 0) { + /* sink requested more time to complete the write via AUX_ACKM */ + if (++aux_ack_m_retries >= AUX_MAX_RETRIES) { + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR, + LOG_FLAG_Error_I2cAux, + "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d", + aux_ack_m_retries, + AUX_MAX_RETRIES); + goto fail; } + + /* retry reading the write status until complete + * NOTE: payload is modified here + */ + payload->write = false; + payload->write_status_update = true; + payload->length = 0; + udelay(300); + } else return true; break; @@ -765,7 +799,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, "dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER"); /* polling_timeout_period is in us */ - defer_time_in_ms += aux110->polling_timeout_period / 1000; + if (aux110) + defer_time_in_ms += aux110->polling_timeout_period / 1000; + else + defer_time_in_ms += AUX_DEFER_DELAY_FOR_DPIA; ++aux_defer_retries; fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 3139285bd403..692fa23ca02b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -76,6 +76,15 @@ SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 5) +#define CS_COMMON_REG_LIST_DCN201(index, pllid) \ + SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ + SRII(PHASE, DP_DTO, 0),\ + SRII(PHASE, DP_DTO, 1),\ + SRII(MODULO, DP_DTO, 0),\ + SRII(MODULO, DP_DTO, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PIXEL_RATE_CNTL, OTG, 1) + #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRII(PHASE, DP_DTO, 0),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 0464a8f3db3c..a3fee929cd12 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -70,6 +70,10 @@ SRII(PIXEL_RATE_CNTL, blk, 4), \ SRII(PIXEL_RATE_CNTL, blk, 5) +#define HWSEQ_PIXEL_RATE_REG_LIST_201(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1) + #define HWSEQ_PHYPLL_REG_LIST(blk) \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \ @@ -94,6 +98,10 @@ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5) +#define HWSEQ_PHYPLL_REG_LIST_201(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) + #define HWSEQ_DCE11_REG_LIST_BASE() \ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \ SR(DCFEV_CLOCK_CONTROL), \ @@ -337,6 +345,29 @@ SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL) +#define HWSEQ_DCN201_REG_LIST()\ + HWSEQ_DCN_REG_LIST(), \ + HWSEQ_PIXEL_RATE_REG_LIST_201(OTG), \ + HWSEQ_PHYPLL_REG_LIST_201(OTG), \ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + MMHUB_SR(MC_VM_FB_LOCATION_BASE), \ + MMHUB_SR(MC_VM_FB_LOCATION_TOP), \ + MMHUB_SR(MC_VM_FB_OFFSET) + #define HWSEQ_DCN30_REG_LIST()\ HWSEQ_DCN2_REG_LIST(),\ HWSEQ_DCN_REG_LIST(), \ @@ -637,6 +668,10 @@ struct dce_hwseq_registers { uint32_t DMU_MEM_PWR_CNTL; uint32_t MMHUBBUB_MEM_PWR_CNTL; uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; + uint32_t MC_VM_FB_LOCATION_BASE; + uint32_t MC_VM_FB_LOCATION_TOP; + uint32_t MC_VM_FB_OFFSET; + uint32_t HPO_TOP_HW_CONTROL; }; /* set field name */ #define HWS_SF(blk_name, reg_name, field_name, post_fix)\ @@ -872,6 +907,11 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh) +#define HWSEQ_DCN201_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh) + #define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\ HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ @@ -1112,7 +1152,9 @@ struct dce_hwseq_registers { type DOMAIN_POWER_GATE;\ type DOMAIN_PGFSM_PWR_STATUS;\ type HPO_HDMISTREAMCLK_G_GATE_DIS;\ - type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE; + type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\ + type I2C_LIGHT_SLEEP_FORCE;\ + type HPO_IO_EN; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e92339235863..e8570060d007 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -49,7 +49,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; - uint32_t round_result; uint32_t bl_period, bl_int_count; uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; @@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c current_backlight = div_u64(current_backlight, bl_period); current_backlight = (current_backlight + 1) >> 1; - current_backlight = (uint64_t)(current_backlight) * bl_period; - - round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); - - round_result = (round_result >> (bl_int_count-1)) & 1; - - current_backlight >>= bl_int_count; - current_backlight += round_result; - return (uint32_t)(current_backlight); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 8d4263da59f2..779bc92a2968 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -919,6 +919,7 @@ static void dce110_stream_encoder_stop_dp_info_packets( } static void dce110_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); @@ -967,6 +968,7 @@ static void dce110_stream_encoder_dp_blank( /* output video stream to link encoder */ static void dce110_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index 54a1408c8015..fb0dec4ed3a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -203,12 +203,33 @@ static bool dmub_abm_init_config(struct abm *abm, return true; } +static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = abm->ctx; + uint8_t panel_mask = 0x01 << panel_inst; + + memset(&cmd, 0, sizeof(cmd)); + cmd.abm_pause.header.type = DMUB_CMD__ABM; + cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE; + cmd.abm_pause.abm_pause_data.enable = pause; + cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; + cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); + + return true; +} + static const struct abm_funcs abm_funcs = { .abm_init = dmub_abm_init, .set_abm_level = dmub_abm_set_level, .get_current_backlight = dmub_abm_get_current_backlight, .get_target_backlight = dmub_abm_get_target_backlight, .init_abm_config = dmub_abm_init_config, + .set_abm_pause = dmub_abm_set_pause, }; static void dmub_abm_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 9baf8ca0a920..b1b2e3c6f379 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd) { union dmub_inbox0_data_register data = { 0 }; + data.inbox0_cmd_lock_hw = hw_lock_cmd; + dc_dmub_srv_clear_inbox0_ack(dmub_srv); dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); + dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } bool should_use_dmub_lock(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index aa8403bc4c83..87ed48d5530d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -50,6 +50,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state) state = PSR_STATE2; else if (raw_state == 0x21) state = PSR_STATE2a; + else if (raw_state == 0x22) + state = PSR_STATE2b; else if (raw_state == 0x30) state = PSR_STATE3; else if (raw_state == 0x31) @@ -225,6 +227,27 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ dc_dmub_srv_wait_idle(dc->dmub_srv); } +/** + * Set PSR power optimization flags. + */ +static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst) +{ + union dmub_rb_cmd cmd; + struct dc_context *dc = dmub->ctx; + + memset(&cmd, 0, sizeof(cmd)); + cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR; + cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT; + cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); + cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; + cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; + cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; + + dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); + dc_dmub_srv_cmd_execute(dc->dmub_srv); + dc_dmub_srv_wait_idle(dc->dmub_srv); +} + /* * Setup PSR by programming phy registers and sending psr hw context values to firmware. */ @@ -306,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; copy_settings_data->panel_inst = panel_inst; + copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + + if (link->fec_state == dc_link_fec_enabled && + (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, + sizeof(link->dpcd_caps.sink_dev_id_str)) || + !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, + sizeof(link->dpcd_caps.sink_dev_id_str)))) + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1; + else + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -356,6 +389,7 @@ static const struct dmub_psr_funcs psr_funcs = { .psr_set_level = dmub_psr_set_level, .psr_force_static = dmub_psr_force_static, .psr_get_residency = dmub_psr_get_residency, + .psr_set_power_opt = dmub_psr_set_power_opt, }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 9675c269e649..01acc01cc191 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -46,6 +46,7 @@ struct dmub_psr_funcs { void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst); void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst); + void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst); }; struct dmub_psr *dmub_psr_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 62d595ded866..3d421583e9ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -46,6 +46,7 @@ #include "transform.h" #include "stream_encoder.h" #include "link_encoder.h" +#include "link_enc_cfg.h" #include "link_hwss.h" #include "dc_link_dp.h" #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -57,7 +58,8 @@ #include "audio.h" #include "reg_helper.h" #include "panel_cntl.h" - +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" /* include DCE11 register header files */ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_sh_mask.h" @@ -1108,11 +1110,23 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) clk_mgr->funcs->enable_pme_wa(clk_mgr); /* un-mute audio */ /* TODO: audio should be per stream rather than per link */ - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.hpo_dp_stream_enc, false); + else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, false); +#else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, false); +#endif if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.audio->enabled = true; } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); } void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) @@ -1129,14 +1143,32 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.hpo_dp_stream_enc, true); + else + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, true); +#else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); +#endif if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.hpo_dp_stream_enc); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.stream_enc); +#else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); +#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1151,6 +1183,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) * stream->stream_engine_id); */ } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); } void dce110_disable_stream(struct pipe_ctx *pipe_ctx) @@ -1158,6 +1193,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; + struct link_encoder *link_enc = NULL; if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) { pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( @@ -1166,16 +1202,53 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( + pipe_ctx->stream_res.hpo_dp_stream_enc); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#else if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#endif pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); dc->hwss.disable_audio_stream(pipe_ctx); - link->link_enc->funcs->connect_dig_be_to_fe( + /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ + if (link->ep_type == DISPLAY_ENDPOINT_PHY) + link_enc = link->link_enc; + else if (dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + ASSERT(link_enc); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable( + pipe_ctx->stream_res.hpo_dp_stream_enc); + setup_dp_hpo_stream(pipe_ctx, false); + /* TODO - DP2.0 HW: unmap stream from link encoder here */ + } else { + if (link_enc) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + } +#else + if (link_enc) + link_enc->funcs->connect_dig_be_to_fe( link->link_enc, pipe_ctx->stream_res.stream_enc->id, false); +#endif + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc->hwseq->funcs.setup_hpo_hw_control && is_dp_128b_132b_signal(pipe_ctx)) + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, false); +#endif } @@ -1192,7 +1265,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { hws->funcs.edp_backlight_control(link, true); @@ -1210,8 +1283,16 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) { + /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( + pipe_ctx->stream_res.hpo_dp_stream_enc); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { +#else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { - pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); +#endif + pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { /* @@ -1436,6 +1517,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; struct drr_params params = {0}; unsigned int event_triggers = 0; struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; @@ -1451,10 +1533,23 @@ static enum dc_status apply_single_controller_ctx_to_hw( build_audio_output(context, pipe_ctx, &audio_output); if (dc_is_dp_signal(pipe_ctx->stream->signal)) +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.audio->inst, + &pipe_ctx->stream->audio_info); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.audio->inst, + &pipe_ctx->stream->audio_info); +#else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.audio->inst, &pipe_ctx->stream->audio_info); +#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc, @@ -1469,10 +1564,18 @@ static enum dc_status apply_single_controller_ctx_to_hw( &pipe_ctx->stream->audio_info); } - /* */ - /* Do not touch stream timing on seamless boot optimization. */ - if (!pipe_ctx->stream->apply_seamless_boot_optimization) - hws->funcs.enable_stream_timing(pipe_ctx, context, dc); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) +#endif + /* */ + /* Do not touch stream timing on seamless boot optimization. */ + if (!pipe_ctx->stream->apply_seamless_boot_optimization) + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); if (hws->funcs.setup_vupdate_interrupt) hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); @@ -1499,6 +1602,14 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); + if (dc_is_dp_signal(pipe_ctx->stream->signal) && + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( + pipe_ctx->stream_res.stream_enc); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, @@ -1526,6 +1637,18 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) { + if (!pipe_ctx->stream->apply_seamless_boot_optimization) + hws->funcs.enable_stream_timing(pipe_ctx, context, dc); + } +#endif + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; @@ -1539,27 +1662,17 @@ static void power_down_encoders(struct dc *dc) { int i; - /* do not know BIOS back-front mapping, simply blank all. It will not - * hurt for non-DP - */ - for (i = 0; i < dc->res_pool->stream_enc_count; i++) { - dc->res_pool->stream_enc[i]->funcs->dp_blank( - dc->res_pool->stream_enc[i]); - } - for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; - if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) - if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(dc->links[i], false); + dc_link_blank_dp_stream(dc->links[i], false); if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; - dc->links[i]->link_enc->funcs->disable_output( - dc->links[i]->link_enc, signal); + if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY) + dc->links[i]->link_enc->funcs->disable_output( + dc->links[i]->link_enc, signal); dc->links[i]->link_status.link_active = false; memset(&dc->links[i]->cur_link_settings, 0, @@ -1720,7 +1833,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) } } // We are trying to enable eDP, don't power down VDD - if (edp_stream_num) + if (edp_stream_num && can_apply_edp_fast_boot) keep_edp_vdd_on = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index cb9767ddf93d..e31a6f1516bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,6 +39,10 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 + #define REG(reg)\ dpp->tf_regs->reg @@ -205,9 +209,17 @@ static void dpp1_power_on_dscl( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); + } else { + if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { + dpp->base.ctx->dc->optimized_required = true; + dpp->base.deferred_reg_writes.bits.disable_dscl = true; + } else { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + } + } } } @@ -677,9 +689,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { int visual_confirm_on = 0; + unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; + if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) visual_confirm_on = 1; + /* Check bounds to ensure the VC bar height was set to a sane value */ + if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && + (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { + visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; + } + REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -691,7 +711,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + 1)); + - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index df8a7718a85f..a2b925cc4132 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -231,7 +231,7 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) if (!s->blank_en) DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, @@ -466,6 +466,71 @@ void dcn10_log_hw_state(struct dc *dc, log_mpc_crc(dc, log_ctx); + { + int hpo_dp_link_enc_count = 0; + + if (pool->hpo_dp_stream_enc_count > 0) { + DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n"); + for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { + struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0}; + struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i]; + + if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) { + hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state); + + DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n", + hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0, + hpo_dp_se_state.stream_enc_enabled, + hpo_dp_se_state.otg_inst, + (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" : + ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" : + (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"), + (hpo_dp_se_state.component_depth == 0) ? 6 : + ((hpo_dp_se_state.component_depth == 1) ? 8 : + (hpo_dp_se_state.component_depth == 2) ? 10 : 12), + hpo_dp_se_state.vid_stream_enabled, + hpo_dp_se_state.sdp_enabled, + hpo_dp_se_state.compressed_format, + hpo_dp_se_state.mapped_to_link_enc); + } + } + + DTN_INFO("\n"); + } + + /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */ + for (i = 0; i < dc->link_count; i++) + if (dc->links[i]->hpo_dp_link_enc) + hpo_dp_link_enc_count++; + + if (hpo_dp_link_enc_count) { + DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n"); + + for (i = 0; i < dc->link_count; i++) { + struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc; + struct hpo_dp_link_enc_state hpo_dp_le_state = {0}; + + if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) { + hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state); + DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n", + hpo_dp_link_enc->inst, + hpo_dp_le_state.link_enc_enabled, + (hpo_dp_le_state.link_mode == 0) ? "TPS1" : + (hpo_dp_le_state.link_mode == 1) ? "TPS2" : + (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST", + hpo_dp_le_state.lane_count, + hpo_dp_le_state.stream_src[0], + hpo_dp_le_state.slot_count[0], + hpo_dp_le_state.vc_rate_x[0], + hpo_dp_le_state.vc_rate_y[0]); + DTN_INFO("\n"); + } + } + + DTN_INFO("\n"); + } + } + DTN_INFO_END(); } @@ -1297,11 +1362,48 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + + /* Power gate DSCs */ + if (hws->funcs.dsc_pg_control != NULL) { + uint32_t num_opps = 0; + uint32_t opp_id_src0 = OPP_ID_INVALID; + uint32_t opp_id_src1 = OPP_ID_INVALID; + + // Step 1: To find out which OPTC is running & OPTC DSC is ON + for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { + uint32_t optc_dsc_state = 0; + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) { + if (tg->funcs->get_dsc_status) + tg->funcs->get_dsc_status(tg, &optc_dsc_state); + // Only one OPTC with DSC is ON, so if we got one result, we would exit this block. + // non-zero value is DSC enabled + if (optc_dsc_state != 0) { + tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); + break; + } + } + } + + // Step 2: To power down DSC but skip DSC of running OPTC + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + + hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false); + } + } } void dcn10_init_hw(struct dc *dc) { - int i, j; + int i; struct abm *abm = dc->res_pool->abm; struct dmcu *dmcu = dc->res_pool->dmcu; struct dce_hwseq *hws = dc->hwseq; @@ -1313,6 +1415,12 @@ void dcn10_init_hw(struct dc *dc) if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); + /* Align bw context with hw config when system resume. */ + if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) { + dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz; + dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz; + } + // Initialize the dccg if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init) dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg); @@ -1397,43 +1505,8 @@ void dcn10_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* DP 2.0 requires that LTTPR Caps be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* - * If any of the displays are lit up turn them off. - * The reason is that some MST hubs cannot be turned off - * completely until we tell them to do so. - * If not turned off, then displays connected to MST hub - * won't light up. - */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1522,7 +1595,7 @@ void dcn10_power_down_on_boot(struct dc *dc) for (i = 0; i < dc->link_count; i++) { struct dc_link *link = dc->links[i]; - if (link->link_enc->funcs->is_dig_enabled && + if (link->link_enc && link->link_enc->funcs->is_dig_enabled && link->link_enc->funcs->is_dig_enabled(link->link_enc) && dc->hwss.power_down) { dc->hwss.power_down(dc); @@ -1566,7 +1639,7 @@ void dcn10_reset_hw_ctx_wrap( dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2274,8 +2347,8 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - struct vm_system_aperture_param apt = { {{ 0 } } }; - struct vm_context0_param vm0 = { { { 0 } } }; + struct vm_system_aperture_param apt = {0}; + struct vm_context0_param vm0 = {0}; mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws); mmhub_read_vm_context0_settings(hubp1, &vm0, hws); @@ -2448,7 +2521,7 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct mpcc_blnd_cfg blnd_cfg = {{0}}; + struct mpcc_blnd_cfg blnd_cfg = {0}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; int mpcc_id; struct mpcc *new_mpcc; @@ -2553,7 +2626,7 @@ static void dcn10_update_dchubp_dpp( /* new calculated dispclk, dppclk are stored in * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz. - * dcn_validate_bandwidth compute new dispclk, dppclk. + * dcn10_validate_bandwidth compute new dispclk, dppclk. * dispclk will put in use after optimize_bandwidth when * ramp_up_dispclk_with_dpp is called. * there are two places for dppclk be put in use. One location @@ -2567,7 +2640,7 @@ static void dcn10_update_dchubp_dpp( * for example, eDP + external dp, change resolution of DP from * 1920x1080x144hz to 1280x960x60hz. * before change: dispclk = 337889 dppclk = 337889 - * change mode, dcn_validate_bandwidth calculate + * change mode, dcn10_validate_bandwidth calculate * dispclk = 143122 dppclk = 143122 * update_dchubp_dpp be executed before dispclk be updated, * dispclk = 337889, but dppclk use new value dispclk /2 = @@ -3176,13 +3249,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data) static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) { - struct pipe_ctx *test_pipe; + struct pipe_ctx *test_pipe, *split_pipe; const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; - const struct rect *r1 = &scl_data->recout, *r2; - int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; + struct rect r1 = scl_data->recout, r2, r2_half; + int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; int cur_layer = pipe_ctx->plane_state->layer_index; - bool upper_pipe_exists = false; - struct fixed31_32 one = dc_fixpt_from_int(1); /** * Disable the cursor if there's another pipe above this with a @@ -3191,26 +3262,33 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) */ for (test_pipe = pipe_ctx->top_pipe; test_pipe; test_pipe = test_pipe->top_pipe) { - if (!test_pipe->plane_state->visible) + // Skip invisible layer and pipe-split plane on same layer + if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) continue; - r2 = &test_pipe->plane_res.scl_data.recout; - r2_r = r2->x + r2->width; - r2_b = r2->y + r2->height; + r2 = test_pipe->plane_res.scl_data.recout; + r2_r = r2.x + r2.width; + r2_b = r2.y + r2.height; + split_pipe = test_pipe; - if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) - return true; + /** + * There is another half plane on same layer because of + * pipe-split, merge together per same height. + */ + for (split_pipe = pipe_ctx->top_pipe; split_pipe; + split_pipe = split_pipe->top_pipe) + if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { + r2_half = split_pipe->plane_res.scl_data.recout; + r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; + r2.width = r2.width + r2_half.width; + r2_r = r2.x + r2.width; + break; + } - if (test_pipe->plane_state->layer_index < cur_layer) - upper_pipe_exists = true; + if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) + return true; } - // if plane scaled, assume an upper plane can handle cursor if it exists. - if (upper_pipe_exists && - (scl_data->ratios.horz.value != one.value || - scl_data->ratios.vert.value != one.value)) - return true; - return false; } @@ -3600,7 +3678,7 @@ void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { - struct encoder_unblank_param params = { { 0 } }; + struct encoder_unblank_param params = {0}; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; @@ -3613,7 +3691,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx, if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) params.timing.pix_clk_100hz /= 2; - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h index f0e0d07b0311..e2508d637e0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h @@ -60,14 +60,18 @@ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ SRI(CURSOR_DST_OFFSET, CURSOR0_, id) +#define IPP_REG_LIST_DCN201(id) \ + IPP_REG_LIST_DCN(id), \ + SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \ + SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \ + SRI(CURSOR_SIZE, CURSOR0_, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(CURSOR_POSITION, CURSOR0_, id), \ + SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ + SRI(CURSOR_DST_OFFSET, CURSOR0_, id) + #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 #define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L -#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 -#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L -#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 -#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L -#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 -#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L #define IPP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -122,6 +126,23 @@ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) +#define IPP_MASK_SH_LIST_DCN201(mask_sh) \ + IPP_MASK_SH_LIST_DCN(mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ + IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) + #define IPP_DCN10_REG_FIELD_LIST(type) \ type CNVC_SURFACE_PIXEL_FORMAT; \ type CNVC_BYPASS; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index e4701825b5a0..2dc4b4e4ba02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1460,5 +1460,14 @@ void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, if (enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (enc->features.flags.bits.IS_UHBR10_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR10; + + if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR13_5; + + if (enc->features.flags.bits.IS_UHBR20_CAPABLE) + max_link_cap.link_rate = LINK_RATE_UHBR20; + *link_settings = max_link_cap; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d8b22618b79e..c337588231ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -118,6 +118,7 @@ struct dcn10_link_enc_registers { uint32_t RDPCSTX_PHY_CNTL4; uint32_t RDPCSTX_PHY_CNTL5; uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSPIPE_PHY_CNTL6; uint32_t RDPCSTX_PHY_CNTL7; uint32_t RDPCSTX_PHY_CNTL8; uint32_t RDPCSTX_PHY_CNTL9; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 37848f4577b1..3d2a2848857a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -304,7 +304,7 @@ void optc1_program_timing( if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2) h_div = H_TIMING_DIV_BY2; - if (REG(OPTC_DATA_FORMAT_CONTROL)) { + if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) { uint32_t data_fmt = 0; if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 7daadb6a5233..19a2dd619ec7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -978,10 +978,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) pool->base.mpc = NULL; } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) @@ -1011,14 +1009,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; } for (i = 0; i < pool->base.audio_count; i++) { @@ -1296,7 +1290,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link( * in daisy chain use case */ j = i; - if (pool->stream_enc[i]->id == + if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id == link->link_enc->preferred_engine) return pool->stream_enc[i]; } @@ -1320,7 +1314,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .panel_cntl_create = dcn10_panel_cntl_create, - .validate_bandwidth = dcn_validate_bandwidth, + .validate_bandwidth = dcn10_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index cf364ae93138..bf4436d7aaab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -29,6 +29,9 @@ #include "dcn10_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" +#include "dcn30/dcn30_afmt.h" #define DC_LOGGER \ enc1->base.ctx->logger @@ -644,6 +647,12 @@ void enc1_stream_encoder_set_throttled_vcp_size( x), 26)); + // If y rounds up to integer, carry it over to x. + if (y >> 26) { + x += 1; + y = 0; + } + REG_SET_2(DP_MSE_RATE_CNTL, 0, DP_MSE_RATE_X, x, DP_MSE_RATE_Y, y); @@ -726,6 +735,16 @@ void enc1_stream_encoder_update_dp_info_packets( 0, /* packetIndex */ &info_frame->vsc); + /* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW. + * Note that the enablement of GSP1 is not done below, + * it's done in FW. + */ + if (info_frame->vsc.valid) + enc1_update_generic_info_packet( + enc1, + 1, /* packetIndex */ + &info_frame->vsc); + if (info_frame->spd.valid) enc1_update_generic_info_packet( enc1, @@ -883,7 +902,21 @@ void enc1_stream_encoder_stop_dp_info_packets( } +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* set DIG_START to 0x1 to reset FIFO */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(100); + + /* write 0 to take the FIFO out of reset */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); +} + void enc1_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -914,6 +947,8 @@ void enc1_stream_encoder_dp_blank( /* disable DP stream */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM); + /* the encoder stops sending the video stream * at the start of the vertical blanking. * Poll for DP_VID_STREAM_STATUS == 0 @@ -930,10 +965,13 @@ void enc1_stream_encoder_dp_blank( */ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET); } /* output video stream to link encoder */ void enc1_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { @@ -1000,6 +1038,8 @@ void enc1_stream_encoder_dp_unblank( */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } void enc1_stream_encoder_set_avmute( @@ -1444,6 +1484,10 @@ void enc1_se_hdmi_audio_setup( void enc1_se_hdmi_audio_disable( struct stream_encoder *enc) { +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (enc->afmt && enc->afmt->funcs->afmt_powerdown) + enc->afmt->funcs->afmt_powerdown(enc->afmt); +#endif enc1_se_enable_audio_clock(enc, false); } @@ -1556,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 0d86df97878c..a146a41f68e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -626,10 +626,15 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc); + void enc1_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc); void enc1_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index ede65100a050..f98aba308028 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -169,7 +169,29 @@ type DTBCLK_DTO_DIV[MAX_PIPES];\ type DCCG_AUDIO_DTO_SEL;\ type DCCG_AUDIO_DTO0_SOURCE_SEL;\ - type DENTIST_DISPCLK_CHG_MODE; + type DENTIST_DISPCLK_CHG_MODE;\ + type DSCCLK0_DTO_PHASE;\ + type DSCCLK0_DTO_MODULO;\ + type DSCCLK1_DTO_PHASE;\ + type DSCCLK1_DTO_MODULO;\ + type DSCCLK2_DTO_PHASE;\ + type DSCCLK2_DTO_MODULO;\ + type DSCCLK0_DTO_ENABLE;\ + type DSCCLK1_DTO_ENABLE;\ + type DSCCLK2_DTO_ENABLE;\ + type SYMCLK32_ROOT_SE0_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE1_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE2_GATE_DISABLE;\ + type SYMCLK32_ROOT_SE3_GATE_DISABLE;\ + type SYMCLK32_ROOT_LE0_GATE_DISABLE;\ + type SYMCLK32_ROOT_LE1_GATE_DISABLE;\ + type DPSTREAMCLK_ROOT_GATE_DISABLE;\ + type DPSTREAMCLK_GATE_DISABLE;\ + type HDMISTREAMCLK0_DTO_PHASE;\ + type HDMISTREAMCLK0_DTO_MODULO;\ + type HDMICHARCLK0_GATE_DISABLE;\ + type HDMICHARCLK0_ROOT_GATE_DISABLE; + struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) @@ -205,6 +227,16 @@ struct dccg_registers { uint32_t SYMCLK32_SE_CNTL; uint32_t SYMCLK32_LE_CNTL; uint32_t DENTIST_DISPCLK_CNTL; + uint32_t DSCCLK_DTO_CTRL; + uint32_t DSCCLK0_DTO_PARAM; + uint32_t DSCCLK1_DTO_PARAM; + uint32_t DSCCLK2_DTO_PARAM; + uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; + uint32_t DPSTREAMCLK_GATE_DISABLE; + uint32_t DCCG_GATE_DISABLE_CNTL3; + uint32_t HDMISTREAMCLK0_DTO_PARAM; + uint32_t DCCG_GATE_DISABLE_CNTL4; + }; struct dcn_dccg { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 79b640e202eb..ef5c4c0f4d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height); REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, + DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index a47ba1d45be9..e6af99ae3d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -52,6 +52,9 @@ #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" #include "hw_sequencer.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" +#include "inc/link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -2120,7 +2123,7 @@ void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { - struct encoder_unblank_param params = { { 0 } }; + struct encoder_unblank_param params = {0}; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct dce_hwseq *hws = link->dc->hwseq; @@ -2135,12 +2138,17 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx, params.link_settings.link_rate = link_settings->link_rate; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + if (is_dp_128b_132b_signal(pipe_ctx)) { + /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.tg->inst); + } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1) params.timing.pix_clk_100hz /= 2; pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1); - pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms); + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); } if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { @@ -2262,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap( dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2290,7 +2298,7 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; - struct mpcc_blnd_cfg blnd_cfg = { {0} }; + struct mpcc_blnd_cfg blnd_cfg = {0}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; int mpcc_id; struct mpcc *new_mpcc; @@ -2374,14 +2382,39 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) uint32_t active_total_with_borders; uint32_t early_control = 0; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct link_encoder *link_enc; + + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream); + else + link_enc = link->link_enc; + ASSERT(link_enc); /* For MST, there are multiply stream go to only one link. * connect DIG back_end to front_end while enable_stream and * disconnect them during disable_stream * BY this, it is logic clean to separate stream and link */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + if (is_dp_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) + pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( + pipe_ctx->stream->ctx->dc->hwseq, true); + setup_dp_hpo_stream(pipe_ctx, true); + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream( + pipe_ctx->stream_res.hpo_dp_stream_enc); + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link( + pipe_ctx->stream_res.hpo_dp_stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc->inst, + link->hpo_dp_link_enc->inst); + } + + if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, pipe_ctx->stream_res.stream_enc->id, true); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { if (link->dc->hwss.program_dmdata_engine) @@ -2390,6 +2423,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.update_info_frame(pipe_ctx); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = timing->h_addressable @@ -2406,7 +2442,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) /* enable audio only within mode set */ if (pipe_ctx->stream_res.audio != NULL) { - if (dc_is_dp_signal(pipe_ctx->stream->signal)) + if (is_dp_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc); + else if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index f6e747f25ebe..8c34751b0e58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -190,6 +190,19 @@ void optc2_set_dsc_config(struct timing_generator *optc, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } +/* Get DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + */ +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_FORMAT_CONTROL, + OPTC_DSC_MODE, dsc_mode); +} + + /*TEMP: Need to figure out inheritance model here.*/ bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { @@ -467,6 +480,11 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc) (h_blank_start - 200 - 1) / optc1->opp_count, MASTER_UPDATE_LOCK_DB_Y, v_blank_start - 1); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); } void optc2_lock_doublebuffer_disable(struct timing_generator *optc) @@ -574,6 +592,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc2_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index be19a6885fbf..f7968b9ca16e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode); + void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index e3e01b17c164..3883f918b3bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -35,7 +35,7 @@ #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" -#include "dml/dcn2x/dcn2x.h" +#include "dml/dcn20/dcn20_fpu.h" #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" @@ -63,6 +63,7 @@ #include "dcn20_dccg.h" #include "dcn20_vmid.h" #include "dc_link_ddc.h" +#include "dc_link_dp.h" #include "dce/dce_panel_cntl.h" #include "navi10_ip_offset.h" @@ -86,6 +87,7 @@ #include "dce/dce_aux.h" #include "dce/dce_i2c.h" #include "vm_helper.h" +#include "link_enc_cfg.h" #include "amdgpu_socbb.h" @@ -1595,15 +1597,32 @@ static void get_pixel_clock_parameters( const struct dc_stream_state *stream = pipe_ctx->stream; struct pipe_ctx *odm_pipe; int opp_cnt = 1; + struct dc_link *link = stream->link; + struct link_encoder *link_enc = NULL; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; - pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc); + } else + link_enc = stream->link->link_enc; + ASSERT(link_enc); + + if (link_enc) + pixel_clk_params->encoder_object_id = link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ + /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * LINK_RATE_REF_FREQ_IN_KHZ; pixel_clk_params->flags.ENABLE_SS = 0; @@ -1854,7 +1873,9 @@ static void swizzle_to_dml_params( case DC_SW_VAR_D_X: *sw_mode = dm_sw_var_d_x; break; - + case DC_SW_VAR_R_X: + *sw_mode = dm_sw_var_r_x; + break; default: ASSERT(0); /* Not supported */ break; @@ -3044,6 +3065,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; } return false; } @@ -3094,6 +3117,10 @@ void dcn20_calculate_dlg_params( context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + + if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz) + context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz; + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; context->bw_ctx.bw.dcn.clk.p_state_change_support = @@ -3140,6 +3167,9 @@ void dcn20_calculate_dlg_params( if (!context->res_ctx.pipe_ctx[i].stream) continue; + if (dc->ctx->dce_version == DCN_VERSION_2_01) + cstate_en = false; + context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].dlg_regs, &context->res_ctx.pipe_ctx[i].ttu_regs, @@ -3152,7 +3182,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, &context->res_ctx.pipe_ctx[i].rq_regs, - pipes[pipe_idx].pipe); + &pipes[pipe_idx].pipe); pipe_idx++; } } @@ -3630,9 +3660,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) return DML_PROJECT_NAVI10v2; } -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) - static bool init_soc_bounding_box(struct dc *dc, struct dcn20_resource_pool *pool) { @@ -3668,16 +3695,22 @@ static bool init_soc_bounding_box(struct dc *dc, clock_limits_available = (status == PP_SMU_RESULT_OK); } - if (clock_limits_available && uclk_states_available && num_states) + if (clock_limits_available && uclk_states_available && num_states) { + DC_FP_START(); dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); - else if (clock_limits_available) + DC_FP_END(); + } else if (clock_limits_available) { + DC_FP_START(); dcn20_cap_soc_clocks(loaded_bb, max_clocks); + DC_FP_END(); + } } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); - + DC_FP_END(); return true; } @@ -3697,8 +3730,6 @@ static bool dcn20_resource_construct( enum dml_project dml_project_version = get_dml_project_version(ctx->asic_id.hw_internal_rev); - DC_FP_START(); - ctx->dc_bios->regs = &bios_regs; pool->base.funcs = &dcn20_res_pool_funcs; @@ -4047,12 +4078,10 @@ static bool dcn20_resource_construct( pool->base.oem_device = NULL; } - DC_FP_END(); return true; create_fail: - DC_FP_END(); dcn20_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index e6307397e0d2..8a70f92795c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -29,6 +29,8 @@ #include "dcn20_stream_encoder.h" #include "reg_helper.h" #include "hw_shared.h" +#include "inc/link_dpcd.h" +#include "dpcd_defs.h" #define DC_LOGGER \ enc1->base.ctx->logger @@ -209,7 +211,8 @@ static void enc2_stream_encoder_stop_hdmi_info_packets( /* Update GSP7 SDP 128 byte long */ static void enc2_update_gsp7_128_info_packet( struct dcn10_stream_encoder *enc1, - const struct dc_info_packet_128 *info_packet) + const struct dc_info_packet_128 *info_packet, + bool immediate_update) { uint32_t i; @@ -264,7 +267,9 @@ static void enc2_update_gsp7_128_info_packet( REG_WRITE(AFMT_GENERIC_7, *content++); } - REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1); + REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC7_FRAME_UPDATE, !immediate_update, + AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update); } /* Set DSC-related configuration. @@ -290,7 +295,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc, static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -306,7 +312,7 @@ static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, pps_sdp.hb2 = 127; pps_sdp.hb3 = 0; memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb)); - enc2_update_gsp7_128_info_packet(enc1, &pps_sdp); + enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update); /* Enable Generic Stream Packet 7 (GSP) transmission */ //REG_UPDATE(DP_SEC_CNTL, @@ -444,6 +450,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing) } void enc2_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) { @@ -522,6 +529,8 @@ void enc2_stream_encoder_dp_unblank( */ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); + + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM); } static void enc2_dp_set_odm_combine( @@ -584,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index f3d1a0237bda..baa1e539f341 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -104,6 +104,7 @@ void enc2_stream_encoder_dp_set_stream_attribute( uint32_t enable_sdp_splitting); void enc2_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile new file mode 100644 index 000000000000..f68038ceb1b1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: MIT +# +# Makefile for DCN. +DCN201 = dcn201_init.o dcn201_resource.o dcn201_hwseq.o \ + dcn201_hubbub.o\ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \ + dcn201_dccg.o dcn201_link_encoder.o + +ifdef CONFIG_X86 +CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -maltivec +endif + +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mhard-float +endif + +ifdef CONFIG_X86 +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mpreferred-stack-boundary=4 +else +CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -msse2 +endif +endif +AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCN201) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c new file mode 100644 index 000000000000..f5bf04f7da25 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c @@ -0,0 +1,84 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn201_dccg.h" + +#include "reg_helper.h" +#include "core_types.h" + +#define TO_DCN_DCCG(dccg)\ + container_of(dccg, struct dcn_dccg, base) + +#define REG(reg) \ + (dccg_dcn->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name + +#define CTX \ + dccg_dcn->base.ctx + +#define DC_LOGGER \ + dccg->ctx->logger + +void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +{ + /* vbios handles it */ +} + +static const struct dccg_funcs dccg201_funcs = { + .update_dpp_dto = dccg201_update_dpp_dto, + .get_dccg_ref_freq = dccg2_get_dccg_ref_freq, + .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en, + .otg_add_pixel = dccg2_otg_add_pixel, + .otg_drop_pixel = dccg2_otg_drop_pixel, + .dccg_init = dccg2_init +}; + +struct dccg *dccg201_create( + struct dc_context *ctx, + const struct dccg_registers *regs, + const struct dccg_shift *dccg_shift, + const struct dccg_mask *dccg_mask) +{ + struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); + struct dccg *base; + + if (dccg_dcn == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + base = &dccg_dcn->base; + base->ctx = ctx; + base->funcs = &dccg201_funcs; + + dccg_dcn->regs = regs; + dccg_dcn->dccg_shift = dccg_shift; + dccg_dcn->dccg_mask = dccg_mask; + + return &dccg_dcn->base; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h new file mode 100644 index 000000000000..80888b0484fb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h @@ -0,0 +1,37 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN201_DCCG_H__ +#define __DCN201_DCCG_H__ + +#include "dcn20/dcn20_dccg.h" + +struct dccg *dccg201_create( + struct dc_context *ctx, + const struct dccg_registers *regs, + const struct dccg_shift *dccg_shift, + const struct dccg_mask *dccg_mask); + +#endif //__DCN201_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c new file mode 100644 index 000000000000..8b6505b7dca8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c @@ -0,0 +1,316 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn201_dpp.h" +#include "basics/conversion.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +static void dpp201_cnv_setup( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + bool force_disable_cursor = false; + uint32_t is_2bit = 0; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + pixel_format = 22; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + dpp1_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } + dpp2_power_on_obuf(dpp_base, true); +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) + +static bool dpp201_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + uint32_t pixel_width; + + if (scl_data->viewport.width > scl_data->recout.width) + pixel_width = scl_data->recout.width; + else + pixel_width = scl_data->viewport.width; + + if (scl_data->viewport.width != scl_data->h_active && + scl_data->viewport.height != scl_data->v_active && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + if (scl_data->ratios.horz.value == (8ll << 32)) + scl_data->ratios.horz.value--; + if (scl_data->ratios.vert.value == (8ll << 32)) + scl_data->ratios.vert.value--; + if (scl_data->ratios.horz_c.value == (8ll << 32)) + scl_data->ratios.horz_c.value--; + if (scl_data->ratios.vert_c.value == (8ll << 32)) + scl_data->ratios.vert_c.value--; + + if (in_taps->h_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) + scl_data->taps.h_taps = 8; + else + scl_data->taps.h_taps = 4; + } else + scl_data->taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) + scl_data->taps.v_taps = 8; + else + scl_data->taps.v_taps = 4; + } else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) + scl_data->taps.v_taps_c = 4; + else + scl_data->taps.v_taps_c = 2; + } else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) + scl_data->taps.h_taps_c = 4; + else + scl_data->taps.h_taps_c = 2; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +static struct dpp_funcs dcn201_dpp_funcs = { + .dpp_read_state = dpp20_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, + .dpp_set_degamma = dpp2_set_degamma, + .dpp_program_input_lut = dpp2_dummy_program_input_lut, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp201_cnv_setup, + .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, + .dpp_program_blnd_lut = dpp20_program_blnd_lut, + .dpp_program_shaper_lut = dpp20_program_shaper, + .dpp_program_3dlut = dpp20_program_3dlut, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp2_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, +}; + +static struct dpp_caps dcn201_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp201_construct( + struct dcn201_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn201_dpp_funcs; + dpp->base.caps = &dcn201_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h new file mode 100644 index 000000000000..cbd5b47b4acf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h @@ -0,0 +1,83 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN201_DPP_H__ +#define __DCN201_DPP_H__ + +#include "dcn20/dcn20_dpp.h" + +#define TO_DCN201_DPP(dpp)\ + container_of(dpp, struct dcn201_dpp, base) + +#define TF_REG_LIST_DCN201(id) \ + TF_REG_LIST_DCN20(id) + +#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN20(mask_sh) + +#define TF_REG_FIELD_LIST_DCN201(type) \ + TF_REG_FIELD_LIST_DCN2_0(type) + +struct dcn201_dpp_shift { + TF_REG_FIELD_LIST_DCN201(uint8_t); +}; + +struct dcn201_dpp_mask { + TF_REG_FIELD_LIST_DCN201(uint32_t); +}; + +#define DPP_DCN201_REG_VARIABLE_LIST \ + DPP_DCN2_REG_VARIABLE_LIST + +struct dcn201_dpp_registers { + DPP_DCN201_REG_VARIABLE_LIST; +}; + +struct dcn201_dpp { + struct dpp base; + + const struct dcn201_dpp_registers *tf_regs; + const struct dcn201_dpp_shift *tf_shift; + const struct dcn201_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +bool dpp201_construct(struct dcn201_dpp *dpp2, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask); + +#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c new file mode 100644 index 000000000000..037d265431c6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c @@ -0,0 +1,107 @@ +/* +* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn201_hubbub.h" +#include "reg_helper.h" + +#define REG(reg)\ + hubbub1->regs->reg + +#define DC_LOGGER \ + hubbub1->base.ctx->logger + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +static bool hubbub201_program_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + return wm_pending; +} + +static const struct hubbub_funcs hubbub201_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = NULL, + .init_vm_ctx = NULL, + .dcc_support_swizzle = hubbub2_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, + .wm_read_state = hubbub2_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub201_program_watermarks, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub201_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub->base.ctx = ctx; + + hubbub->base.funcs = &hubbub201_funcs; + + hubbub->regs = hubbub_regs; + hubbub->shifts = hubbub_shift; + hubbub->masks = hubbub_mask; + + hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h new file mode 100644 index 000000000000..5aeca0be3e15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef DAL_DC_DCN201_DCN201_HUBBUB_H_ +#define DAL_DC_DCN201_DCN201_HUBBUB_H_ + +#include "dcn20/dcn20_hubbub.h" + +#define HUBBUB_REG_LIST_DCN201(id)\ + HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_VM_REG_LIST(), \ + SR(DCHUBBUB_CRC_CTRL) + +#define HUBBUB_MASK_SH_LIST_DCN201(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh) + +void hubbub201_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +#endif /* DAL_DC_DCN201_DCN201_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c new file mode 100644 index 000000000000..6b6f74d4afd1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -0,0 +1,150 @@ +/* + * Copyright 2012-17 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dcn201_hubp.h" + +#include "dm_services.h" +#include "dce_calcs.h" +#include "reg_helper.h" +#include "basics/conversion.h" + +#define REG(reg)\ + hubp201->hubp_regs->reg + +#define CTX \ + hubp201->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubp201->hubp_shift->field_name, hubp201->hubp_mask->field_name + +static void hubp201_program_surface_config( + struct hubp *hubp, + enum surface_pixel_format format, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + unsigned int compat_level) +{ + hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks); + hubp1_program_tiling(hubp, tiling_info, format); + hubp1_program_size(hubp, format, plane_size, dcc); + hubp1_program_pixel_format(hubp, format); +} + +void hubp201_program_deadline( + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *ttu_attr) +{ + hubp1_program_deadline(hubp, dlg_attr, ttu_attr); +} + +void hubp201_program_requestor( + struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) +{ + struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp); + + REG_UPDATE(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address); + + REG_SET_4(DCN_EXPANSION_MODE, 0, + DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode, + PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode, + MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, + CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + + REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, + CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size, + SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height); + + REG_SET_5(DCHUBP_REQ_SIZE_CONFIG_C, 0, + CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size, + SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height); +} + +static void hubp201_setup( + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_attr, + struct _vcs_dpi_display_ttu_regs_st *ttu_attr, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) +{ + hubp2_vready_at_or_After_vsync(hubp, pipe_dest); + hubp201_program_requestor(hubp, rq_regs); + hubp201_program_deadline(hubp, dlg_attr, ttu_attr); +} + +static struct hubp_funcs dcn201_hubp_funcs = { + .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, + .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, + .hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr, + .hubp_program_surface_config = hubp201_program_surface_config, + .hubp_is_flip_pending = hubp1_is_flip_pending, + .hubp_setup = hubp201_setup, + .hubp_setup_interdependent = hubp2_setup_interdependent, + .set_cursor_attributes = hubp2_cursor_set_attributes, + .set_cursor_position = hubp1_cursor_set_position, + .set_blank = hubp1_set_blank, + .dcc_control = hubp1_dcc_control, + .mem_program_viewport = min_set_viewport, + .hubp_clk_cntl = hubp1_clk_cntl, + .hubp_vtg_sel = hubp1_vtg_sel, + .dmdata_set_attributes = hubp2_dmdata_set_attributes, + .dmdata_load = hubp2_dmdata_load, + .dmdata_status_done = hubp2_dmdata_status_done, + .hubp_read_state = hubp2_read_state, + .hubp_clear_underflow = hubp1_clear_underflow, + .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, + .hubp_init = hubp1_init, +}; + +bool dcn201_hubp_construct( + struct dcn201_hubp *hubp201, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_hubp_registers *hubp_regs, + const struct dcn201_hubp_shift *hubp_shift, + const struct dcn201_hubp_mask *hubp_mask) +{ + hubp201->base.funcs = &dcn201_hubp_funcs; + hubp201->base.ctx = ctx; + hubp201->hubp_regs = hubp_regs; + hubp201->hubp_shift = hubp_shift; + hubp201->hubp_mask = hubp_mask; + hubp201->base.inst = inst; + hubp201->base.opp_id = OPP_ID_INVALID; + hubp201->base.mpcc_id = 0xf; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h new file mode 100644 index 000000000000..a1e3384eed63 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h @@ -0,0 +1,132 @@ +/* + * Copyright 2012-17 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MEM_INPUT_DCN201_H__ +#define __DC_MEM_INPUT_DCN201_H__ + +#include "../dcn10/dcn10_hubp.h" +#include "../dcn20/dcn20_hubp.h" + +#define TO_DCN201_HUBP(hubp)\ + container_of(hubp, struct dcn201_hubp, base) + +#define HUBP_REG_LIST_DCN201(id)\ + HUBP_REG_LIST_DCN(id),\ + SRI(PREFETCH_SETTINGS, HUBPREQ, id),\ + SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\ + SRI(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \ + SRI(CURSOR_SETTINGS, HUBPREQ, id), \ + SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \ + SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \ + SRI(CURSOR_SIZE, CURSOR0_, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(CURSOR_POSITION, CURSOR0_, id), \ + SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \ + SRI(CURSOR_DST_OFFSET, CURSOR0_, id), \ + SRI(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \ + SRI(DMDATA_ADDRESS_LOW, CURSOR0_, id), \ + SRI(DMDATA_CNTL, CURSOR0_, id), \ + SRI(DMDATA_SW_CNTL, CURSOR0_, id), \ + SRI(DMDATA_QOS_CNTL, CURSOR0_, id), \ + SRI(DMDATA_SW_DATA, CURSOR0_, id), \ + SRI(DMDATA_STATUS, CURSOR0_, id),\ + SRI(FLIP_PARAMETERS_0, HUBPREQ, id),\ + SRI(FLIP_PARAMETERS_2, HUBPREQ, id) + +#define HUBP_MASK_SH_LIST_DCN201(mask_sh)\ + HUBP_MASK_SH_LIST_DCN(mask_sh),\ + HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\ + HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\ + HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\ + HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \ + HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ + HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \ + HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh),\ + HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\ + HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\ + HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh) + +#define DCN201_HUBP_REG_VARIABLE_LIST \ + DCN2_HUBP_REG_COMMON_VARIABLE_LIST + +#define DCN201_HUBP_REG_FIELD_VARIABLE_LIST(type) \ + DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) + +struct dcn201_hubp_registers { + DCN201_HUBP_REG_VARIABLE_LIST; +}; + +struct dcn201_hubp_shift { + DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); +}; + +struct dcn201_hubp_mask { + DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); +}; + +struct dcn201_hubp { + struct hubp base; + struct dcn_hubp_state state; + const struct dcn201_hubp_registers *hubp_regs; + const struct dcn201_hubp_shift *hubp_shift; + const struct dcn201_hubp_mask *hubp_mask; +}; + +bool dcn201_hubp_construct( + struct dcn201_hubp *hubp201, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_hubp_registers *hubp_regs, + const struct dcn201_hubp_shift *hubp_shift, + const struct dcn201_hubp_mask *hubp_mask); + +#endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c new file mode 100644 index 000000000000..cfd09b3f705e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -0,0 +1,630 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "basics/dc_common.h" +#include "core_types.h" +#include "resource.h" +#include "dcn201_hwseq.h" +#include "dcn201_optc.h" +#include "dce/dce_hwseq.h" +#include "hubp.h" +#include "dchubbub.h" +#include "timing_generator.h" +#include "opp.h" +#include "ipp.h" +#include "mpc.h" +#include "dccg.h" +#include "clk_mgr.h" +#include "reg_helper.h" + +#define CTX \ + hws->ctx + +#define REG(reg)\ + hws->regs->reg + +#define DC_LOGGER \ + dc->ctx->logger + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + +static bool patch_address_for_sbs_tb_stereo( + struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr) +{ + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + bool sec_split = pipe_ctx->top_pipe && + pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; + + if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO && + (pipe_ctx->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE || + pipe_ctx->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM)) { + *addr = plane_state->address.grph_stereo.left_addr; + plane_state->address.grph_stereo.left_addr = + plane_state->address.grph_stereo.right_addr; + return true; + } else { + if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE && + plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) { + plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO; + plane_state->address.grph_stereo.right_addr = + plane_state->address.grph_stereo.left_addr; + plane_state->address.grph_stereo.right_meta_addr = + plane_state->address.grph_stereo.left_meta_addr; + } + } + return false; +} + +static void gpu_addr_to_uma(struct dce_hwseq *hwseq, + PHYSICAL_ADDRESS_LOC *addr) +{ + bool is_in_uma; + + if (hwseq->fb_base.quad_part <= addr->quad_part && + addr->quad_part < hwseq->fb_top.quad_part) { + addr->quad_part -= hwseq->fb_base.quad_part; + addr->quad_part += hwseq->fb_offset.quad_part; + is_in_uma = true; + } else if (hwseq->fb_offset.quad_part <= addr->quad_part && + addr->quad_part <= hwseq->uma_top.quad_part) { + is_in_uma = true; + } else if (addr->quad_part == 0) { + is_in_uma = false; + } else { + is_in_uma = false; + } +} + +static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq, + struct dc_plane_address *addr) +{ + switch (addr->type) { + case PLN_ADDR_TYPE_GRAPHICS: + gpu_addr_to_uma(hwseq, &addr->grph.addr); + gpu_addr_to_uma(hwseq, &addr->grph.meta_addr); + break; + case PLN_ADDR_TYPE_GRPH_STEREO: + gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_addr); + gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_meta_addr); + gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_addr); + gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_meta_addr); + break; + case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: + gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_addr); + gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_meta_addr); + gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_addr); + gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_meta_addr); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + bool addr_patched = false; + PHYSICAL_ADDRESS_LOC addr; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dce_hwseq *hws = dc->hwseq; + struct dc_plane_address uma = plane_state->address; + + if (plane_state == NULL) + return; + + addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr); + + plane_address_in_gpu_space_to_uma(hws, &uma); + + pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( + pipe_ctx->plane_res.hubp, + &uma, + plane_state->flip_immediate); + + plane_state->status.requested_address = plane_state->address; + + if (plane_state->flip_immediate) + plane_state->status.current_address = plane_state->address; + + if (addr_patched) + pipe_ctx->plane_state->address.grph_stereo.left_addr = addr; +} + +/* Blank pixel data during initialization */ +void dcn201_init_blank( + struct dc *dc, + struct timing_generator *tg) +{ + struct dce_hwseq *hws = dc->hwseq; + enum dc_color_space color_space; + struct tg_color black_color = {0}; + struct output_pixel_processor *opp = NULL; + uint32_t num_opps, opp_id_src0, opp_id_src1; + uint32_t otg_active_width, otg_active_height; + + /* program opp dpg blank color */ + color_space = COLOR_SPACE_SRGB; + color_space_to_black_color(dc, color_space, &black_color); + + /* get the OTG active size */ + tg->funcs->get_otg_active_size(tg, + &otg_active_width, + &otg_active_height); + + /* get the OPTC source */ + tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); + ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp); + opp = dc->res_pool->opps[opp_id_src0]; + + opp->funcs->opp_set_disp_pattern_generator( + opp, + CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, + COLOR_DEPTH_UNDEFINED, + &black_color, + otg_active_width, + otg_active_height, + 0); + + hws->funcs.wait_for_blank_complete(opp); +} + +static void read_mmhub_vm_setup(struct dce_hwseq *hws) +{ + uint32_t fb_base = REG_READ(MC_VM_FB_LOCATION_BASE); + uint32_t fb_top = REG_READ(MC_VM_FB_LOCATION_TOP); + uint32_t fb_offset = REG_READ(MC_VM_FB_OFFSET); + + /* MC_VM_FB_LOCATION_TOP is in pages, actual top should add 1 */ + fb_top++; + + /* bit 23:0 in register map to bit 47:24 in address */ + hws->fb_base.low_part = fb_base; + hws->fb_base.quad_part <<= 24; + + hws->fb_top.low_part = fb_top; + hws->fb_top.quad_part <<= 24; + hws->fb_offset.low_part = fb_offset; + hws->fb_offset.quad_part <<= 24; + + hws->uma_top.quad_part = hws->fb_top.quad_part + - hws->fb_base.quad_part + hws->fb_offset.quad_part; +} + +void dcn201_init_hw(struct dc *dc) +{ + int i, j; + struct dce_hwseq *hws = dc->hwseq; + struct resource_pool *res_pool = dc->res_pool; + struct dc_state *context = dc->current_state; + + if (res_pool->dccg->funcs->dccg_init) + res_pool->dccg->funcs->dccg_init(res_pool->dccg); + + if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) + dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); + REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); + + hws->funcs.dccg_init(hws); + + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); + REG_WRITE(REFCLK_CNTL, 0); + } else { + hws->funcs.bios_golden_init(dc); + + if (dc->ctx->dc_bios->fw_info_valid) { + res_pool->ref_clocks.xtalin_clock_inKhz = + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency; + + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + if (res_pool->dccg && res_pool->hubbub) { + (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg, + dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency, + &res_pool->ref_clocks.dccg_ref_clock_inKhz); + + (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub, + res_pool->ref_clocks.dccg_ref_clock_inKhz, + &res_pool->ref_clocks.dchub_ref_clock_inKhz); + } else { + res_pool->ref_clocks.dccg_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + res_pool->ref_clocks.dchub_ref_clock_inKhz = + res_pool->ref_clocks.xtalin_clock_inKhz; + } + } + } else + ASSERT_CRITICAL(false); + for (i = 0; i < dc->link_count; i++) { + /* Power up AND update implementation according to the + * required signal (which may be different from the + * default signal on connector). + */ + struct dc_link *link = dc->links[i]; + + link->link_enc->funcs->hw_init(link->link_enc); + } + if (hws->fb_offset.quad_part == 0) + read_mmhub_vm_setup(hws); + } + + /* Blank pixel data with OPP DPG */ + for (i = 0; i < res_pool->timing_generator_count; i++) { + struct timing_generator *tg = res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) { + dcn201_init_blank(dc, tg); + } + } + + for (i = 0; i < res_pool->timing_generator_count; i++) { + struct timing_generator *tg = res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) + tg->funcs->lock(tg); + } + + for (i = 0; i < res_pool->pipe_count; i++) { + struct dpp *dpp = res_pool->dpps[i]; + + dpp->funcs->dpp_reset(dpp); + } + + /* Reset all MPCC muxes */ + res_pool->mpc->funcs->mpc_init(res_pool->mpc); + + /* initialize OPP mpc_tree parameter */ + for (i = 0; i < res_pool->res_cap->num_opp; i++) { + res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst; + res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + for (j = 0; j < MAX_PIPES; j++) + res_pool->opps[i]->mpcc_disconnect_pending[j] = false; + } + + for (i = 0; i < res_pool->timing_generator_count; i++) { + struct timing_generator *tg = res_pool->timing_generators[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = res_pool->hubps[i]; + struct dpp *dpp = res_pool->dpps[i]; + + pipe_ctx->stream_res.tg = tg; + pipe_ctx->pipe_idx = i; + + pipe_ctx->plane_res.hubp = hubp; + pipe_ctx->plane_res.dpp = dpp; + pipe_ctx->plane_res.mpcc_inst = dpp->inst; + hubp->mpcc_id = dpp->inst; + hubp->opp_id = OPP_ID_INVALID; + hubp->power_gated = false; + pipe_ctx->stream_res.opp = NULL; + + hubp->funcs->hubp_init(hubp); + + res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; + pipe_ctx->stream_res.opp = res_pool->opps[i]; + /*To do: number of MPCC != number of opp*/ + hws->funcs.plane_atomic_disconnect(dc, pipe_ctx); + } + + /* initialize DWB pointer to MCIF_WB */ + for (i = 0; i < res_pool->res_cap->num_dwb; i++) + res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i]; + + for (i = 0; i < res_pool->timing_generator_count; i++) { + struct timing_generator *tg = res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) + tg->funcs->unlock(tg); + } + + for (i = 0; i < res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + dc->hwss.disable_plane(dc, pipe_ctx); + + pipe_ctx->stream_res.tg = NULL; + pipe_ctx->plane_res.hubp = NULL; + } + + for (i = 0; i < res_pool->timing_generator_count; i++) { + struct timing_generator *tg = res_pool->timing_generators[i]; + + tg->funcs->tg_init(tg); + } + + /* end of FPGA. Below if real ASIC */ + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) + return; + + for (i = 0; i < res_pool->audio_count; i++) { + struct audio *audio = res_pool->audios[i]; + + audio->funcs->hw_init(audio); + } + + /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ + REG_WRITE(DIO_MEM_PWR_CTRL, 0); + + if (!dc->debug.disable_clock_gate) { + /* enable all DCN clock gating */ + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); + } +} + +/* trigger HW to start disconnect plane from stream on the next vsync */ +void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + int dpp_id = pipe_ctx->plane_res.dpp->inst; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove = NULL; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + bool mpcc_removed = false; + + mpc_tree_params = &(opp->mpc_tree_params); + + /* check if this plane is being used by an MPCC in the secondary blending chain */ + if (mpc->funcs->get_mpcc_for_dpp_from_secondary) + mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id); + + /* remove MPCC from secondary if being used */ + if (mpcc_to_remove != NULL && mpc->funcs->remove_mpcc_from_secondary) { + mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, mpcc_to_remove); + mpcc_removed = true; + } + + /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */ + mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); + if (mpcc_to_remove != NULL) { + mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); + mpcc_removed = true; + } + + /*Already reset*/ + if (mpcc_removed == false) + return; + + if (opp != NULL) + opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; + + dc->optimized_required = true; + + if (hubp->funcs->hubp_disconnect) + hubp->funcs->hubp_disconnect(hubp); + + if (dc->debug.sanity_checks) + hws->funcs.verify_allow_pstate_change_high(dc); +} + +void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct mpcc_blnd_cfg blnd_cfg; + bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; + int mpcc_id, dpp_id; + struct mpcc *new_mpcc; + struct mpcc *remove_mpcc = NULL; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { + get_hdr_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); + } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { + get_surface_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); + } else { + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, + &blnd_cfg.black_color); + } + + if (per_pixel_alpha) + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + else + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + + blnd_cfg.overlap_only = false; + + if (pipe_ctx->plane_state->global_alpha_value) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else + blnd_cfg.global_alpha = 0xff; + + blnd_cfg.global_gain = 0xff; + blnd_cfg.background_color_bpc = 4; + blnd_cfg.bottom_gain_mode = 0; + blnd_cfg.top_gain = 0x1f000; + blnd_cfg.bottom_inside_gain = 0x1f000; + blnd_cfg.bottom_outside_gain = 0x1f000; + /*the input to MPCC is RGB*/ + blnd_cfg.black_color.color_b_cb = 0; + blnd_cfg.black_color.color_g_y = 0; + blnd_cfg.black_color.color_r_cr = 0; + + /* DCN1.0 has output CM before MPC which seems to screw with + * pre-multiplied alpha. This is a w/a hopefully unnecessary for DCN2. + */ + blnd_cfg.pre_multiplied_alpha = per_pixel_alpha; + + /* + * TODO: remove hack + * Note: currently there is a bug in init_hw such that + * on resume from hibernate, BIOS sets up MPCC0, and + * we do mpcc_remove but the mpcc cannot go to idle + * after remove. This cause us to pick mpcc1 here, + * which causes a pstate hang for yet unknown reason. + */ + dpp_id = hubp->inst; + mpcc_id = dpp_id; + + /* If there is no full update, don't need to touch MPC tree*/ + if (!pipe_ctx->plane_state->update_flags.bits.full_update) { + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); + return; + } + + /* check if this plane is being used by an MPCC in the secondary blending chain */ + if (mpc->funcs->get_mpcc_for_dpp_from_secondary) + remove_mpcc = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id); + + /* remove MPCC from secondary if being used */ + if (remove_mpcc != NULL && mpc->funcs->remove_mpcc_from_secondary) + mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, remove_mpcc); + + /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */ + remove_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); + /* remove MPCC if being used */ + + if (remove_mpcc != NULL) + mpc->funcs->remove_mpcc(mpc, mpc_tree_params, remove_mpcc); + else + if (dc->debug.sanity_checks) + mpc->funcs->assert_mpcc_idle_before_connect( + dc->res_pool->mpc, mpcc_id); + + /* Call MPC to insert new plane */ + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc, + mpc_tree_params, + &blnd_cfg, + NULL, + NULL, + dpp_id, + mpcc_id); + + ASSERT(new_mpcc != NULL); + hubp->opp_id = pipe_ctx->stream_res.opp->inst; + hubp->mpcc_id = mpcc_id; +} + +void dcn201_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = NULL; + hubp = dc->res_pool->hubps[pipe->pipe_idx]; + /* use TG master update lock to lock everything on the TG + * therefore only top pipe need to lock + */ + if (pipe->top_pipe) + return; + + if (dc->debug.sanity_checks) + hws->funcs.verify_allow_pstate_change_high(dc); + + if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { + if (lock) + pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); + else + pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg); + } else { + if (lock) + pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg); + else + pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg); + } + + if (dc->debug.sanity_checks) + hws->funcs.verify_allow_pstate_change_high(dc); +} + +void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx) +{ + struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes; + + gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq, &attributes->address); + + pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes( + pipe_ctx->plane_res.hubp, attributes); + pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes( + pipe_ctx->plane_res.dpp, attributes); +} + +void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx) +{ + struct dc_dmdata_attributes attr = { 0 }; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + + gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq, + &pipe_ctx->stream->dmdata_address); + + attr.dmdata_mode = DMDATA_HW_MODE; + attr.dmdata_size = + dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36; + attr.address.quad_part = + pipe_ctx->stream->dmdata_address.quad_part; + attr.dmdata_dl_delta = 0; + attr.dmdata_qos_mode = 0; + attr.dmdata_qos_level = 0; + attr.dmdata_repeat = 1; /* always repeat */ + attr.dmdata_updated = 1; + attr.dmdata_sw_data = NULL; + + hubp->funcs->dmdata_set_attributes(hubp, &attr); +} + +void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings) +{ + struct encoder_unblank_param params = { { 0 } }; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dce_hwseq *hws = link->dc->hwseq; + + /* only 3 items below are used by unblank */ + params.timing = pipe_ctx->stream->timing; + + params.link_settings.link_rate = link_settings->link_rate; + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) { + /*check whether it is half the rate*/ + if (optc201_is_two_pixels_per_containter(&stream->timing)) + params.timing.pix_clk_100hz /= 2; + + pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms); + } + + if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { + hws->funcs.edp_backlight_control(link, true); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h new file mode 100644 index 000000000000..26cd62be6418 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h @@ -0,0 +1,46 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN201_H__ +#define __DC_HWSS_DCN201_H__ + +#include "hw_sequencer_private.h" + +void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx); +void dcn201_init_hw(struct dc *dc); +void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx, + struct dc_link_settings *link_settings); +void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx); +void dcn201_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn201_init_blank( + struct dc *dc, + struct timing_generator *tg); +#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c new file mode 100644 index 000000000000..f1f89f93603f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c @@ -0,0 +1,131 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dce110/dce110_hw_sequencer.h" +#include "dcn10/dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" +#include "dcn201_hwseq.h" + +static const struct hw_sequencer_funcs dcn201_funcs = { + .program_gamut_remap = dcn10_program_gamut_remap, + .init_hw = dcn201_init_hw, + .power_down_on_boot = NULL, + .apply_ctx_to_hw = dce110_apply_ctx_to_hw, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, + .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, + .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, + .update_plane_addr = dcn201_update_plane_addr, + .update_dchub = dcn10_update_dchub, + .update_pending_status = dcn10_update_pending_status, + .program_output_csc = dcn20_program_output_csc, + .enable_accelerated_mode = dce110_enable_accelerated_mode, + .enable_timing_synchronization = dcn10_enable_timing_synchronization, + .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset, + .update_info_frame = dce110_update_info_frame, + .send_immediate_sdp_message = dcn10_send_immediate_sdp_message, + .enable_stream = dce110_enable_stream, + .disable_stream = dce110_disable_stream, + .unblank_stream = dcn201_unblank_stream, + .blank_stream = dce110_blank_stream, + .enable_audio_stream = dce110_enable_audio_stream, + .disable_audio_stream = dce110_disable_audio_stream, + .disable_plane = dcn10_disable_plane, + .pipe_control_lock = dcn201_pipe_control_lock, + .interdependent_update_lock = dcn10_lock_all_pipes, + .cursor_lock = dcn10_cursor_lock, + .prepare_bandwidth = dcn20_prepare_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, + .update_bandwidth = dcn20_update_bandwidth, + .set_drr = dcn10_set_drr, + .get_position = dcn10_get_position, + .set_static_screen_control = dcn10_set_static_screen_control, + .setup_stereo = dcn10_setup_stereo, + .set_avmute = dce110_set_avmute, + .log_hw_state = dcn10_log_hw_state, + .get_hw_state = dcn10_get_hw_state, + .clear_status_bits = dcn10_clear_status_bits, + .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .edp_backlight_control = dce110_edp_backlight_control, + .edp_power_control = dce110_edp_power_control, + .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, + .program_triplebuffer = dcn20_program_triple_buffer, + .dmdata_status_done = dcn20_dmdata_status_done, + .set_dmdata_attributes = dcn201_set_dmdata_attributes, + .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, + .calc_vupdate_position = dcn10_calc_vupdate_position, + .set_cursor_position = dcn10_set_cursor_position, + .set_cursor_attribute = dcn201_set_cursor_attribute, + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .set_backlight_level = dce110_set_backlight_level, + .set_abm_immediate_disable = dce110_set_abm_immediate_disable, + .set_pipe = dce110_set_pipe, + .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, + .update_visual_confirm_color = dcn20_update_visual_confirm_color, +}; + +static const struct hwseq_private_funcs dcn201_private_funcs = { + .init_pipes = NULL, + .update_plane_addr = dcn201_update_plane_addr, + .plane_atomic_disconnect = dcn201_plane_atomic_disconnect, + .program_pipe = dcn10_program_pipe, + .update_mpcc = dcn201_update_mpcc, + .set_input_transfer_func = dcn20_set_input_transfer_func, + .set_output_transfer_func = dcn20_set_output_transfer_func, + .power_down = dce110_power_down, + .enable_display_power_gating = dcn10_dummy_display_power_gating, + .blank_pixel_data = dcn20_blank_pixel_data, + .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap, + .enable_stream_timing = dcn20_enable_stream_timing, + .edp_backlight_control = dce110_edp_backlight_control, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .did_underflow_occur = dcn10_did_underflow_occur, + .init_blank = dcn201_init_blank, + .disable_vga = dcn10_disable_vga, + .bios_golden_init = dcn10_bios_golden_init, + .plane_atomic_disable = dcn10_plane_atomic_disable, + .plane_atomic_power_down = dcn10_plane_atomic_power_down, + .enable_power_gating_plane = dcn10_enable_power_gating_plane, + .dpp_pg_control = dcn10_dpp_pg_control, + .hubp_pg_control = dcn10_hubp_pg_control, + .dsc_pg_control = NULL, + .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .wait_for_blank_complete = dcn20_wait_for_blank_complete, + .dccg_init = dcn20_dccg_init, + .set_blend_lut = dcn20_set_blend_lut, + .set_shaper_3dlut = dcn20_set_shaper_3dlut, +}; + +void dcn201_hw_sequencer_construct(struct dc *dc) +{ + dc->hwss = dcn201_funcs; + dc->hwseq->funcs = dcn201_private_funcs; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h new file mode 100644 index 000000000000..1168887b033d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h @@ -0,0 +1,33 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_DCN201_INIT_H__ +#define __DC_DCN201_INIT_H__ + +struct dc; + +void dcn201_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN201_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c new file mode 100644 index 000000000000..a65e8f7801db --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -0,0 +1,209 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include "core_types.h" +#include "link_encoder.h" +#include "dcn201_link_encoder.h" +#include "stream_encoder.h" +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx + +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + uint32_t value1, value2; + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + dcn10_link_encoder_get_max_link_cap(enc, link_settings); + REG_GET_2(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value1, + RDPCS_PHY_DPALT_DP4, &value2); + /*limit to combo_phy*/ + if (enc->usbc_combo_phy) { + if (!value1 && !value2 && link_settings->lane_count > LANE_COUNT_TWO) + link_settings->lane_count = LANE_COUNT_TWO; + } +} + +bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + uint32_t value; + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + REG_GET(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value); + + // if value == 1 alt mode is disabled, otherwise it is enabled + return !value; +} + +static const struct link_encoder_funcs dcn201_link_enc_funcs = { + .read_state = link_enc2_read_state, + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc2_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn10_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, + .disable_output = dcn10_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .get_dig_frontend = dcn10_get_dig_frontend, + .fec_is_active = enc2_fec_is_active, + .is_in_alt_mode = dcn201_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn201_link_encoder_get_max_link_cap, +}; + +void dcn201_link_encoder_construct( + struct dcn20_link_encoder *enc20, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc20->enc10; + + enc10->base.funcs = &dcn201_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + + /* if (dal_adapter_service_is_feature_supported(as, + * FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + * enc10->base.features.flags.bits. + * DP_SINK_DETECT_POLL_DATA_PIN = true; + */ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h new file mode 100644 index 000000000000..8b95ef251332 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h @@ -0,0 +1,59 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN201_H__ +#define __DC_LINK_ENCODER__DCN201_H__ + +#include "dcn20/dcn20_link_encoder.h" + +#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\ + DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_RATE, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh) + +#define DPCS_DCN201_REG_LIST(id) \ + DPCS_DCN2_CMN_REG_LIST(id) + +void dcn201_link_encoder_construct( + struct dcn20_link_encoder *enc20, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +#endif /* __DC_LINK_ENCODER__DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c new file mode 100644 index 000000000000..95c4c55f067c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c @@ -0,0 +1,125 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn201_mpc.h" + +#define REG(reg)\ + mpc201->mpc_regs->reg + +#define CTX \ + mpc201->base.ctx + +#define DC_LOGGER \ + mpc201->base.ctx->logger + +#undef FN +#define FN(reg_name, field_name) \ + mpc201->mpc_shift->field_name, mpc201->mpc_mask->field_name + +static void mpc201_set_out_rate_control( + struct mpc *mpc, + int opp_id, + bool enable, + bool rate_2x_mode, + struct mpc_dwb_flow_control *flow_control) +{ + struct dcn201_mpc *mpc201 = TO_DCN201_MPC(mpc); + + REG_UPDATE_2(MUX[opp_id], + MPC_OUT_RATE_CONTROL_DISABLE, !enable, + MPC_OUT_RATE_CONTROL, rate_2x_mode); + + if (flow_control) + REG_UPDATE_3(MUX[opp_id], + MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode, + MPC_OUT_FLOW_CONTROL_COUNT0, flow_control->flow_ctrl_cnt0, + MPC_OUT_FLOW_CONTROL_COUNT1, flow_control->flow_ctrl_cnt1); +} + +static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst) +{ + mpcc->mpcc_id = mpcc_inst; + mpcc->dpp_id = 0xf; + mpcc->mpcc_bot = NULL; + mpcc->blnd_cfg.overlap_only = false; + mpcc->blnd_cfg.global_alpha = 0xff; + mpcc->blnd_cfg.global_gain = 0xff; + mpcc->blnd_cfg.background_color_bpc = 4; + mpcc->blnd_cfg.bottom_gain_mode = 0; + mpcc->blnd_cfg.top_gain = 0x1f000; + mpcc->blnd_cfg.bottom_inside_gain = 0x1f000; + mpcc->blnd_cfg.bottom_outside_gain = 0x1f000; + mpcc->sm_cfg.enable = false; + mpcc->shared_bottom = false; +} + +const struct mpc_funcs dcn201_mpc_funcs = { + .read_mpcc_state = mpc1_read_mpcc_state, + .insert_plane = mpc1_insert_plane, + .remove_mpcc = mpc1_remove_mpcc, + .mpc_init = mpc1_mpc_init, + .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .update_blending = mpc2_update_blending, + .cursor_lock = mpc1_cursor_lock, + .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, + .get_mpcc_for_dpp_from_secondary = NULL, + .wait_for_idle = mpc2_assert_idle_mpcc, + .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect, + .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, + .set_denorm = mpc2_set_denorm, + .set_denorm_clamp = mpc2_set_denorm_clamp, + .set_output_csc = mpc2_set_output_csc, + .set_ocsc_default = mpc2_set_ocsc_default, + .set_output_gamma = mpc2_set_output_gamma, + .set_out_rate_control = mpc201_set_out_rate_control, + .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, + .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, +}; + +void dcn201_mpc_construct(struct dcn201_mpc *mpc201, + struct dc_context *ctx, + const struct dcn201_mpc_registers *mpc_regs, + const struct dcn201_mpc_shift *mpc_shift, + const struct dcn201_mpc_mask *mpc_mask, + int num_mpcc) +{ + int i; + + mpc201->base.ctx = ctx; + + mpc201->base.funcs = &dcn201_mpc_funcs; + + mpc201->mpc_regs = mpc_regs; + mpc201->mpc_shift = mpc_shift; + mpc201->mpc_mask = mpc_mask; + + mpc201->mpcc_in_use_mask = 0; + mpc201->num_mpcc = num_mpcc; + + for (i = 0; i < MAX_MPCC; i++) + mpc201_init_mpcc(&mpc201->base.mpcc_array[i], i); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h new file mode 100644 index 000000000000..b9ce0c1ba5c7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h @@ -0,0 +1,86 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_MPCC_DCN201_H__ +#define __DC_MPCC_DCN201_H__ + +#include "dcn20/dcn20_mpc.h" + +#define TO_DCN201_MPC(mpc_base) \ + container_of(mpc_base, struct dcn201_mpc, base) + +#define MPC_REG_LIST_DCN201(inst) \ + MPC_REG_LIST_DCN2_0(inst) + +#define MPC_OUT_MUX_REG_LIST_DCN201(inst) \ + MPC_OUT_MUX_REG_LIST_DCN2_0(inst) + +#define MPC_REG_VARIABLE_LIST_DCN201 \ + MPC_REG_VARIABLE_LIST_DCN2_0 + +#define MPC_COMMON_MASK_SH_LIST_DCN201(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT0, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT1, mask_sh) + +#define MPC_REG_FIELD_LIST_DCN201(type) \ + MPC_REG_FIELD_LIST_DCN2_0(type) \ + type MPC_OUT_RATE_CONTROL;\ + type MPC_OUT_RATE_CONTROL_DISABLE;\ + type MPC_OUT_FLOW_CONTROL_MODE;\ + type MPC_OUT_FLOW_CONTROL_COUNT0;\ + type MPC_OUT_FLOW_CONTROL_COUNT1; + +struct dcn201_mpc_registers { + MPC_REG_VARIABLE_LIST_DCN201 +}; + +struct dcn201_mpc_shift { + MPC_REG_FIELD_LIST_DCN201(uint8_t) +}; + +struct dcn201_mpc_mask { + MPC_REG_FIELD_LIST_DCN201(uint32_t) +}; + +struct dcn201_mpc { + struct mpc base; + int mpcc_in_use_mask; + int num_mpcc; + const struct dcn201_mpc_registers *mpc_regs; + const struct dcn201_mpc_shift *mpc_shift; + const struct dcn201_mpc_mask *mpc_mask; +}; + +void dcn201_mpc_construct(struct dcn201_mpc *mpc201, + struct dc_context *ctx, + const struct dcn201_mpc_registers *mpc_regs, + const struct dcn201_mpc_shift *mpc_shift, + const struct dcn201_mpc_mask *mpc_mask, + int num_mpcc); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c new file mode 100644 index 000000000000..8e77db46a409 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -0,0 +1,72 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dcn201_opp.h" +#include "reg_helper.h" + +#define REG(reg) \ + (oppn201->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + oppn201->opp_shift->field_name, oppn201->opp_mask->field_name + +#define CTX \ + oppn201->base.ctx + +/*****************************************/ +/* Constructor, Destructor */ +/*****************************************/ + +static struct opp_funcs dcn201_opp_funcs = { + .opp_set_dyn_expansion = opp1_set_dyn_expansion, + .opp_program_fmt = opp1_program_fmt, + .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction, + .opp_program_stereo = opp1_program_stereo, + .opp_pipe_clock_control = opp1_pipe_clock_control, + .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, + .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, + .dpg_is_blanked = opp2_dpg_is_blanked, + .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, + .opp_destroy = opp1_destroy, + .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, +}; + +void dcn201_opp_construct(struct dcn201_opp *oppn201, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_opp_registers *regs, + const struct dcn201_opp_shift *opp_shift, + const struct dcn201_opp_mask *opp_mask) +{ + oppn201->base.ctx = ctx; + oppn201->base.inst = inst; + oppn201->base.funcs = &dcn201_opp_funcs; + + oppn201->regs = regs; + oppn201->opp_shift = opp_shift; + oppn201->opp_mask = opp_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h new file mode 100644 index 000000000000..aca389ec1779 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h @@ -0,0 +1,74 @@ +/* Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPP_DCN201_H__ +#define __DC_OPP_DCN201_H__ + +#include "dcn20/dcn20_opp.h" + +#define TO_DCN201_OPP(opp)\ + container_of(opp, struct dcn201_opp, base) + +#define OPP_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define OPP_REG_LIST_DCN201(id) \ + OPP_REG_LIST_DCN10(id), \ + OPP_DPG_REG_LIST(id), \ + SRI(FMT_422_CONTROL, FMT, id) + +#define OPP_MASK_SH_LIST_DCN201(mask_sh) \ + OPP_MASK_SH_LIST_DCN20(mask_sh) + +#define OPP_DCN201_REG_FIELD_LIST(type) \ + OPP_DCN20_REG_FIELD_LIST(type); + +struct dcn201_opp_shift { + OPP_DCN201_REG_FIELD_LIST(uint8_t); +}; + +struct dcn201_opp_mask { + OPP_DCN201_REG_FIELD_LIST(uint32_t); +}; + +struct dcn201_opp_registers { + OPP_REG_VARIABLE_LIST_DCN2_0; +}; + +struct dcn201_opp { + struct output_pixel_processor base; + const struct dcn201_opp_registers *regs; + const struct dcn201_opp_shift *opp_shift; + const struct dcn201_opp_mask *opp_mask; + bool is_write_to_ram_a_safe; +}; + +void dcn201_opp_construct(struct dcn201_opp *oppn201, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_opp_registers *regs, + const struct dcn201_opp_shift *opp_shift, + const struct dcn201_opp_mask *opp_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c new file mode 100644 index 000000000000..730875dfd8b4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c @@ -0,0 +1,203 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" +#include "dcn201_optc.h" +#include "dcn10/dcn10_optc.h" +#include "dc.h" + +#define REG(reg)\ + optc1->tg_regs->reg + +#define CTX \ + optc1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + optc1->tg_shift->field_name, optc1->tg_mask->field_name + +/*TEMP: Need to figure out inheritance model here.*/ +bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) +{ + return optc1_is_two_pixels_per_containter(timing); +} + +static void optc201_triplebuffer_lock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_GLOBAL_CONTROL0, 0, + OTG_MASTER_UPDATE_LOCK_SEL, optc->inst); + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 1); + + if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS) + REG_WAIT(OTG_MASTER_UPDATE_LOCK, + UPDATE_LOCK_STATUS, 1, + 1, 10); +} + +static void optc201_triplebuffer_unlock(struct timing_generator *optc) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_SET(OTG_MASTER_UPDATE_LOCK, 0, + OTG_MASTER_UPDATE_LOCK, 0); + REG_SET(OTG_VUPDATE_KEEPOUT, 0, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0); + +} + +static bool optc201_validate_timing( + struct timing_generator *optc, + const struct dc_crtc_timing *timing) +{ + uint32_t v_blank; + uint32_t h_blank; + uint32_t min_v_blank; + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + ASSERT(timing != NULL); + + v_blank = (timing->v_total - timing->v_addressable - + timing->v_border_top - timing->v_border_bottom); + + h_blank = (timing->h_total - timing->h_addressable - + timing->h_border_right - + timing->h_border_left); + + if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && + timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING && + timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM && + timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE && + timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE && + timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) + return false; + + /* Check maximum number of pixels supported by Timing Generator + * (Currently will never fail, in order to fail needs display which + * needs more than 8192 horizontal and + * more than 8192 vertical total pixels) + */ + if (timing->h_total > optc1->max_h_total || + timing->v_total > optc1->max_v_total) + return false; + + if (h_blank < optc1->min_h_blank) + return false; + + if (timing->h_sync_width < optc1->min_h_sync_width || + timing->v_sync_width < optc1->min_v_sync_width) + return false; + + min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank; + + if (v_blank < min_v_blank) + return false; + + return true; + +} + +static void optc201_get_optc_source(struct timing_generator *optc, + uint32_t *num_of_src_opp, + uint32_t *src_opp_id_0, + uint32_t *src_opp_id_1) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_SOURCE_SELECT, + OPTC_SEG0_SRC_SEL, src_opp_id_0); + + *num_of_src_opp = 1; +} + +static struct timing_generator_funcs dcn201_tg_funcs = { + .validate_timing = optc201_validate_timing, + .program_timing = optc1_program_timing, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, + .program_global_sync = optc1_program_global_sync, + .enable_crtc = optc2_enable_crtc, + .disable_crtc = optc1_disable_crtc, + /* used by enable_timing_synchronization. Not need for FPGA */ + .is_counter_moving = optc1_is_counter_moving, + .get_position = optc1_get_position, + .get_frame_count = optc1_get_vblank_counter, + .get_scanoutpos = optc1_get_crtc_scanoutpos, + .get_otg_active_size = optc1_get_otg_active_size, + .set_early_control = optc1_set_early_control, + /* used by enable_timing_synchronization. Not need for FPGA */ + .wait_for_state = optc1_wait_for_state, + .set_blank = optc1_set_blank, + .is_blanked = optc1_is_blanked, + .set_blank_color = optc1_program_blank_color, + .did_triggered_reset_occur = optc1_did_triggered_reset_occur, + .enable_reset_trigger = optc1_enable_reset_trigger, + .enable_crtc_reset = optc1_enable_crtc_reset, + .disable_reset_trigger = optc1_disable_reset_trigger, + .triplebuffer_lock = optc201_triplebuffer_lock, + .triplebuffer_unlock = optc201_triplebuffer_unlock, + .lock = optc1_lock, + .unlock = optc1_unlock, + .enable_optc_clock = optc1_enable_optc_clock, + .set_drr = optc1_set_drr, + .get_last_used_drr_vtotal = NULL, + .set_vtotal_min_max = optc1_set_vtotal_min_max, + .set_static_screen_control = optc1_set_static_screen_control, + .program_stereo = optc1_program_stereo, + .is_stereo_left_eye = optc1_is_stereo_left_eye, + .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer, + .tg_init = optc1_tg_init, + .is_tg_enabled = optc1_is_tg_enabled, + .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred, + .clear_optc_underflow = optc1_clear_optc_underflow, + .get_crc = optc1_get_crc, + .configure_crc = optc2_configure_crc, + .set_dsc_config = optc2_set_dsc_config, + .set_dwb_source = NULL, + .get_optc_source = optc201_get_optc_source, + .set_vtg_params = optc1_set_vtg_params, + .program_manual_trigger = optc2_program_manual_trigger, + .setup_manual_trigger = optc2_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, +}; + +void dcn201_timing_generator_init(struct optc *optc1) +{ + optc1->base.funcs = &dcn201_tg_funcs; + + optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1; + optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1; + + optc1->min_h_blank = 32; + optc1->min_v_blank = 3; + optc1->min_v_blank_interlace = 5; + optc1->min_h_sync_width = 8; + optc1->min_v_sync_width = 1; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h new file mode 100644 index 000000000000..e9545b73513a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h @@ -0,0 +1,74 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_OPTC_DCN201_H__ +#define __DC_OPTC_DCN201_H__ + +#include "dcn20/dcn20_optc.h" + +#define TG_COMMON_REG_LIST_DCN201(inst) \ + TG_COMMON_REG_LIST_DCN(inst),\ + SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\ + SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\ + SRI(OTG_GSL_WINDOW_X, OTG, inst),\ + SRI(OTG_GSL_WINDOW_Y, OTG, inst),\ + SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\ + SRI(OTG_DSC_START_POSITION, OTG, inst),\ + SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\ + SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\ + SRI(OPTC_WIDTH_CONTROL, ODM, inst),\ + SR(DWB_SOURCE_SELECT) + +#define TG_COMMON_MASK_SH_LIST_DCN201(mask_sh)\ + TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\ + SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\ + SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\ + SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \ + SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \ + SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \ + SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\ + SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\ + SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\ + SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\ + SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\ + SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh) + +void dcn201_timing_generator_init(struct optc *optc); + +bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c new file mode 100644 index 000000000000..0fa381088d1d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -0,0 +1,1307 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" + +#include "dcn201_init.h" +#include "dml/dcn20/dcn20_fpu.h" +#include "resource.h" +#include "include/irq_service_interface.h" +#include "dcn201_resource.h" + +#include "dcn20/dcn20_resource.h" + +#include "dcn10/dcn10_hubp.h" +#include "dcn10/dcn10_ipp.h" +#include "dcn201_mpc.h" +#include "dcn201_hubp.h" +#include "irq/dcn201/irq_service_dcn201.h" +#include "dcn201/dcn201_dpp.h" +#include "dcn201/dcn201_hubbub.h" +#include "dcn201_dccg.h" +#include "dcn201_optc.h" +#include "dcn201_hwseq.h" +#include "dce110/dce110_hw_sequencer.h" +#include "dcn201_opp.h" +#include "dcn201/dcn201_link_encoder.h" +#include "dcn20/dcn20_stream_encoder.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_audio.h" +#include "dce/dce_hwseq.h" +#include "virtual/virtual_stream_encoder.h" +#include "dce110/dce110_resource.h" +#include "dce/dce_aux.h" +#include "dce/dce_i2c.h" +#include "dcn201_hubbub.h" +#include "dcn10/dcn10_resource.h" + +#include "cyan_skillfish_ip_offset.h" + +#include "dcn/dcn_2_0_3_offset.h" +#include "dcn/dcn_2_0_3_sh_mask.h" +#include "dpcs/dpcs_2_0_3_offset.h" +#include "dpcs/dpcs_2_0_3_sh_mask.h" + +#include "mmhub/mmhub_2_0_0_offset.h" +#include "mmhub/mmhub_2_0_0_sh_mask.h" +#include "nbio/nbio_7_4_offset.h" + +#include "reg_helper.h" + +#define MIN_DISP_CLK_KHZ 100000 +#define MIN_DPP_CLK_KHZ 100000 + +struct _vcs_dpi_ip_params_st dcn201_ip = { + .gpuvm_enable = 0, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .rob_buffer_size_kbytes = 168, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs_luma = 84, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, + .line_buffer_fixed_bpp = 0, + .dcc_supported = true, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 12, + .writeback_max_vscl_taps = 12, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 9600, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 2, + .max_num_dpp = 4, + .max_num_wb = 0, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 8, + .max_vscl_ratio = 8, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 30, + .dppclk_delay_subtotal = 77, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 8, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 87, + .dcfclk_cstate_latency = 10, + .max_inter_dcn_tile_repeaters = 8, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { + .clock_limits = { + { + .state = 0, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 200.0, + .dispclk_mhz = 300.0, + .dppclk_mhz = 300.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 1, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 250.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 3600.0, + }, + { + .state = 2, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 750.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 6800.0, + }, + { + .state = 3, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 250.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 4, + .dscclk_mhz = 400.0, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 750.0, + .dispclk_mhz = 1200.0, + .dppclk_mhz = 1200.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1254.0, + .dram_speed_mts = 14000.0, + } + }, + .num_states = 4, + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 256, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 256, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0, + .max_avg_sdp_bw_use_normal_percent = 80.0, + .max_avg_dram_bw_use_normal_percent = 69.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 80.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.3, + .downspread_percent = 0.3, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 128, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 250.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3000, + .use_urgent_burst_bw = 0, +}; + +enum dcn20_clk_src_array_id { + DCN20_CLK_SRC_PLL0, + DCN20_CLK_SRC_PLL1, + DCN20_CLK_SRC_TOTAL_DCN201 +}; + +/* begin ********************* + * macros to expend register list macro defined in HW object header file */ + +/* DCN */ + +#undef BASE_INNER +#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRIR(var_name, reg_name, block, id)\ + .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define SRI_IX(reg_name, block, id)\ + .reg_name = ix ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \ + mmMM ## reg_name + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +#define clk_src_regs(index, pllid)\ +[index] = {\ + CS_COMMON_REG_LIST_DCN201(index, pllid),\ +} + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +#define audio_regs(id)\ +[id] = {\ + AUD_COMMON_REG_LIST(id)\ +} + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +#define stream_enc_regs(id)\ +[id] = {\ + SE_DCN2_REG_LIST(id)\ +} + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN20(_MASK) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), +}; + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN_COMMON_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh) + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK) +}; + +#define ipp_regs(id)\ +[id] = {\ + IPP_REG_LIST_DCN201(id),\ +} + +static const struct dcn10_ipp_registers ipp_regs[] = { + ipp_regs(0), + ipp_regs(1), + ipp_regs(2), + ipp_regs(3), +}; + +static const struct dcn10_ipp_shift ipp_shift = { + IPP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn10_ipp_mask ipp_mask = { + IPP_MASK_SH_LIST_DCN201(_MASK) +}; + +#define opp_regs(id)\ +[id] = {\ + OPP_REG_LIST_DCN201(id),\ +} + +static const struct dcn201_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1), +}; + +static const struct dcn201_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN201(_MASK) +}; + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUX_RESET_MASK = 0 \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1) +}; + +#define tf_regs(id)\ +[id] = {\ + TF_REG_LIST_DCN201(id),\ +} + +static const struct dcn201_dpp_registers tf_regs[] = { + tf_regs(0), + tf_regs(1), + tf_regs(2), + tf_regs(3), +}; + +static const struct dcn201_dpp_shift tf_shift = { + TF_REG_LIST_SH_MASK_DCN201(__SHIFT) +}; + +static const struct dcn201_dpp_mask tf_mask = { + TF_REG_LIST_SH_MASK_DCN201(_MASK) +}; + +static const struct dcn201_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN201(0), + MPC_REG_LIST_DCN201(1), + MPC_REG_LIST_DCN201(2), + MPC_REG_LIST_DCN201(3), + MPC_REG_LIST_DCN201(4), + MPC_OUT_MUX_REG_LIST_DCN201(0), + MPC_OUT_MUX_REG_LIST_DCN201(1), +}; + +static const struct dcn201_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN201(_MASK) +}; + +#define tg_regs_dcn201(id)\ +[id] = {TG_COMMON_REG_LIST_DCN201(id)} + +static const struct dcn_optc_registers tg_regs[] = { + tg_regs_dcn201(0), + tg_regs_dcn201(1) +}; + +static const struct dcn_optc_shift tg_shift = { + TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn_optc_mask tg_mask = { + TG_COMMON_MASK_SH_LIST_DCN201(_MASK) +}; + +#define hubp_regsDCN201(id)\ +[id] = {\ + HUBP_REG_LIST_DCN201(id)\ +} + +static const struct dcn201_hubp_registers hubp_regs[] = { + hubp_regsDCN201(0), + hubp_regsDCN201(1), + hubp_regsDCN201(2), + hubp_regsDCN201(3) +}; + +static const struct dcn201_hubp_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn201_hubp_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN201(_MASK) +}; + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN201(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN201(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN201(_MASK) +}; + + +static const struct dccg_registers dccg_regs = { + DCCG_COMMON_REG_LIST_DCN_BASE() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK) +}; + +static const struct resource_caps res_cap_dnc201 = { + .num_timing_generator = 2, + .num_opp = 2, + .num_video_plane = 4, + .num_audio = 2, + .num_stream_encoder = 2, + .num_pll = 2, + .num_ddc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .blends_with_above = true, + .blends_with_below = true, + .per_pixel_alpha = true, + + .pixel_format_support = { + .argb8888 = true, + .nv12 = false, + .fp16 = true, + .p010 = false, + }, + + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 1 + }, + + .max_downscale_factor = { + .argb8888 = 250, + .nv12 = 250, + .fp16 = 250 + }, + 64, + 64 +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_AVOID, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .az_endpoint_mute_only = true, + .max_downscale_src_width = 3840, + .disable_pplib_wm_range = true, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_tri_buf = false, +}; + +static void dcn201_dpp_destroy(struct dpp **dpp) +{ + kfree(TO_DCN201_DPP(*dpp)); + *dpp = NULL; +} + +static struct dpp *dcn201_dpp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn201_dpp *dpp = + kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); + + if (!dpp) + return NULL; + + if (dpp201_construct(dpp, ctx, inst, + &tf_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + kfree(dpp); + return NULL; +} + +static struct input_pixel_processor *dcn201_ipp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn10_ipp *ipp = + kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + + if (!ipp) { + return NULL; + } + + dcn20_ipp_construct(ipp, ctx, inst, + &ipp_regs[inst], &ipp_shift, &ipp_mask); + return &ipp->base; +} + + +static struct output_pixel_processor *dcn201_opp_create( + struct dc_context *ctx, uint32_t inst) +{ + struct dcn201_opp *opp = + kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); + + if (!opp) { + return NULL; + } + + dcn201_opp_construct(opp, ctx, inst, + &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +struct dce_aux *dcn201_aux_engine_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = + kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, + SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2), +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +struct dce_i2c_hw *dcn201_i2c_hw_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = + kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, + &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) +{ + struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), + GFP_ATOMIC); + + if (!mpc201) + return NULL; + + dcn201_mpc_construct(mpc201, ctx, + &mpc_regs, + &mpc_shift, + &mpc_mask, + num_mpcc); + + return &mpc201->base; +} + +static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) +{ + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), + GFP_ATOMIC); + + if (!hubbub) + return NULL; + + hubbub201_construct(hubbub, ctx, + &hubbub_reg, + &hubbub_shift, + &hubbub_mask); + + return &hubbub->base; +} + +static struct timing_generator *dcn201_timing_generator_create( + struct dc_context *ctx, + uint32_t instance) +{ + struct optc *tgn10 = + kzalloc(sizeof(struct optc), GFP_ATOMIC); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &tg_regs[instance]; + tgn10->tg_shift = &tg_shift; + tgn10->tg_mask = &tg_mask; + + dcn201_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +struct link_encoder *dcn201_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = + kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); + struct dcn10_link_encoder *enc10 = &enc20->enc10; + + if (!enc20) + return NULL; + + dcn201_link_encoder_construct(enc20, + enc_init_data, + &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc10->base; +} + +struct clock_source *dcn201_clock_source_create( + struct dc_context *ctx, + struct dc_bios *bios, + enum clock_source_id id, + const struct dce110_clk_src_regs *regs, + bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = + kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + + if (!clk_src) + return NULL; + + if (dce112_clk_src_construct(clk_src, ctx, bios, id, + regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + kfree(clk_src); + return NULL; +} + +static void read_dce_straps( + struct dc_context *ctx, + struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static struct audio *dcn201_create_audio( + struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, + &audio_regs[inst], &audio_shift, &audio_mask); +} + +static struct stream_encoder *dcn201_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); + + if (!enc1) + return NULL; + + dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, + &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN201_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN201_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN201_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn201_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn201_create_audio, + .create_stream_encoder = dcn201_stream_encoder_create, + .create_hwseq = dcn201_hwseq_create, +}; + +static const struct resource_create_funcs res_create_maximus_funcs = { + .read_dce_straps = NULL, + .create_audio = NULL, + .create_stream_encoder = NULL, + .create_hwseq = dcn201_hwseq_create, +}; + +void dcn201_clock_source_destroy(struct clock_source **clk_src) +{ + kfree(TO_DCE110_CLK_SRC(*clk_src)); + *clk_src = NULL; +} + +static void dcn201_resource_destruct(struct dcn201_resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->base.stream_enc_count; i++) { + if (pool->base.stream_enc[i] != NULL) { + kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i])); + pool->base.stream_enc[i] = NULL; + } + } + + + if (pool->base.mpc != NULL) { + kfree(TO_DCN201_MPC(pool->base.mpc)); + pool->base.mpc = NULL; + } + + if (pool->base.hubbub != NULL) { + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; + } + + for (i = 0; i < pool->base.pipe_count; i++) { + if (pool->base.dpps[i] != NULL) + dcn201_dpp_destroy(&pool->base.dpps[i]); + + if (pool->base.ipps[i] != NULL) + pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]); + + if (pool->base.hubps[i] != NULL) { + kfree(TO_DCN10_HUBP(pool->base.hubps[i])); + pool->base.hubps[i] = NULL; + } + + if (pool->base.irqs != NULL) { + dal_irq_service_destroy(&pool->base.irqs); + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + if (pool->base.opps[i] != NULL) + pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]); + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + if (pool->base.timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); + pool->base.timing_generators[i] = NULL; + } + } + for (i = 0; i < pool->base.audio_count; i++) { + if (pool->base.audios[i]) + dce_aud_destroy(&pool->base.audios[i]); + } + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] != NULL) { + dcn201_clock_source_destroy(&pool->base.clock_sources[i]); + pool->base.clock_sources[i] = NULL; + } + } + + if (pool->base.dp_clock_source != NULL) { + dcn201_clock_source_destroy(&pool->base.dp_clock_source); + pool->base.dp_clock_source = NULL; + } + + if (pool->base.dccg != NULL) + dcn_dccg_destroy(&pool->base.dccg); +} + +static struct hubp *dcn201_hubp_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn201_hubp *hubp201 = + kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); + + if (!hubp201) + return NULL; + + if (dcn201_hubp_construct(hubp201, ctx, inst, + &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp201->base; + + kfree(hubp201); + return NULL; +} + +static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer( + struct dc_state *context, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); + struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe); + + if (!head_pipe) + ASSERT(0); + + if (!idle_pipe) + return NULL; + + idle_pipe->stream = head_pipe->stream; + idle_pipe->stream_res.tg = head_pipe->stream_res.tg; + idle_pipe->stream_res.opp = head_pipe->stream_res.opp; + + idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx]; + idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst; + + return idle_pipe; +} + +static bool dcn201_get_dcc_compression_cap(const struct dc *dc, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, + input, + output); +} + + +static void dcn201_destroy_resource_pool(struct resource_pool **pool) +{ + struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool); + + dcn201_resource_destruct(dcn201_pool); + kfree(dcn201_pool); + *pool = NULL; +} + +static void dcn201_link_init(struct dc_link *link) +{ + if (link->ctx->dc_bios->integrated_info) + link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control; +} + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn201_get_dcc_compression_cap, +}; + +static struct resource_funcs dcn201_res_pool_funcs = { + .link_init = dcn201_link_init, + .destroy = dcn201_destroy_resource_pool, + .link_enc_create = dcn201_link_encoder_create, + .panel_cntl_create = NULL, + .validate_bandwidth = dcn20_validate_bandwidth, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, + .add_stream_to_ctx = dcn20_add_stream_to_ctx, + .add_dsc_to_stream_resource = NULL, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, + .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link +}; + +static bool dcn201_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dcn201_resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap_dnc201; + pool->base.funcs = &dcn201_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + pool->base.pipe_count = 4; + pool->base.mpcc_count = 5; + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; + + dc->caps.max_slave_planes = 1; + dc->caps.max_slave_yuv_planes = 1; + dc->caps.max_slave_rgb_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 1; + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.dgam_rom_caps.pq = 0; + dc->caps.color.dpp.dgam_rom_caps.hlg = 0; + dc->caps.color.dpp.post_csc = 0; + dc->caps.color.dpp.gamma_corr = 0; + dc->caps.color.dpp.dgam_rom_for_yuv = 1; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN2 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 0; + dc->caps.color.mpc.num_3dluts = 0; + dc->caps.color.mpc.shared_3d_lut = 0; + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + dc->debug = debug_defaults_drv; + + /*a0 only, remove later*/ + dc->work_arounds.no_connect_phy_config = true; + dc->work_arounds.dedcn20_305_wa = true; + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCN20_CLK_SRC_PLL0] = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL1] = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + + pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201; + + /* todo: not reuse phy_pll registers */ + pool->base.dp_clock_source = + dcn201_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + goto create_fail; + } + } + + pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->base.dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + goto create_fail; + } + + dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn201_ip.max_num_dpp = pool->base.pipe_count; + dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201); + { + struct irq_service_init_data init_data; + init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dcn201_create(&init_data); + if (!pool->base.irqs) + goto create_fail; + } + + /* mem input -> ipp -> dpp -> opp -> TG */ + for (i = 0; i < pool->base.pipe_count; i++) { + pool->base.hubps[i] = dcn201_hubp_create(ctx, i); + if (pool->base.hubps[i] == NULL) { + dm_error( + "DC: failed to create memory input!\n"); + goto create_fail; + } + + pool->base.ipps[i] = dcn201_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + dm_error( + "DC: failed to create input pixel processor!\n"); + goto create_fail; + } + + pool->base.dpps[i] = dcn201_dpp_create(ctx, i); + if (pool->base.dpps[i] == NULL) { + dm_error( + "DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_opp; i++) { + pool->base.opps[i] = dcn201_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + pool->base.engines[i] = dcn201_aux_engine_create(ctx, i); + if (pool->base.engines[i] == NULL) { + dm_error( + "DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i); + if (pool->base.hw_i2cs[i] == NULL) { + dm_error( + "DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->base.sw_i2cs[i] = NULL; + } + + for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { + pool->base.timing_generators[i] = dcn201_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + + pool->base.timing_generator_count = i; + + pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count); + if (pool->base.mpc == NULL) { + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + pool->base.hubbub = dcn201_hubbub_create(ctx); + if (pool->base.hubbub == NULL) { + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + if (!resource_construct(num_virtual_links, dc, &pool->base, + (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? + &res_create_funcs : &res_create_maximus_funcs))) + goto create_fail; + + dcn201_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->base.pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + return true; + +create_fail: + + dcn201_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn201_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc) +{ + struct dcn201_resource_pool *pool = + kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); + + if (!pool) + return NULL; + + if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool)) + return &pool->base; + + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h new file mode 100644 index 000000000000..e0467d17d4ae --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h @@ -0,0 +1,50 @@ +/* +* Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_RESOURCE_DCN201_H__ +#define __DC_RESOURCE_DCN201_H__ + +#include "core_types.h" + +#define RRDPCS_PHY_DP_TX_PSTATE_POWER_UP 0x00000000 +#define RRDPCS_PHY_DP_TX_PSTATE_HOLD 0x00000001 +#define RRDPCS_PHY_DP_TX_PSTATE_HOLD_OFF 0x00000002 +#define RRDPCS_PHY_DP_TX_PSTATE_POWER_DOWN 0x00000003 + +#define TO_DCN201_RES_POOL(pool)\ + container_of(pool, struct dcn201_resource_pool, base) + +struct dc; +struct resource_pool; +struct _vcs_dpi_display_pipe_params_st; + +struct dcn201_resource_pool { + struct resource_pool base; +}; +struct resource_pool *dcn201_create_resource_pool( + const struct dc_init_data *init_data, + struct dc *dc); + +#endif /* __DC_RESOURCE_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index fbbdf9976183..d452a0d1777e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -35,7 +35,7 @@ #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" -#include "dml/dcn2x/dcn2x.h" +#include "dml/dcn20/dcn20_fpu.h" #include "clk_mgr.h" #include "dcn10/dcn10_hubp.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c index fa981cd04dd0..95528e5ef89e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c @@ -44,11 +44,14 @@ afmt3->base.ctx -static void afmt3_setup_hdmi_audio( +void afmt3_setup_hdmi_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); + if (afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); + /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); @@ -113,7 +116,7 @@ static union audio_cea_channels speakers_to_channels( return cea_channels; } -static void afmt3_se_audio_setup( +void afmt3_se_audio_setup( struct afmt *afmt, unsigned int az_inst, struct audio_info *audio_info) @@ -138,20 +141,24 @@ static void afmt3_se_audio_setup( REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); /* Disable forced mem power off */ - REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); + if (afmt->funcs->afmt_poweron == NULL) + REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0); } -static void afmt3_audio_mute_control( +void afmt3_audio_mute_control( struct afmt *afmt, bool mute) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); - + if (mute && afmt->funcs->afmt_powerdown) + afmt->funcs->afmt_powerdown(afmt); + if (!mute && afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); /* enable/disable transmission of audio packets */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); } -static void afmt3_audio_info_immediate_update( +void afmt3_audio_info_immediate_update( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); @@ -160,11 +167,14 @@ static void afmt3_audio_info_immediate_update( REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); } -static void afmt3_setup_dp_audio( +void afmt3_setup_dp_audio( struct afmt *afmt) { struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt); + if (afmt->funcs->afmt_poweron) + afmt->funcs->afmt_poweron(afmt); + /* AFMT_AUDIO_PACKET_CONTROL */ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h index 85d4619207e2..97e0cf62f98e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h @@ -121,6 +121,12 @@ struct afmt_funcs { void (*setup_dp_audio)( struct afmt *afmt); + + void (*afmt_poweron)( + struct afmt *afmt); + + void (*afmt_powerdown)( + struct afmt *afmt); }; struct afmt { @@ -136,6 +142,24 @@ struct dcn30_afmt { const struct dcn30_afmt_mask *afmt_mask; }; +void afmt3_setup_hdmi_audio( + struct afmt *afmt); + +void afmt3_se_audio_setup( + struct afmt *afmt, + unsigned int az_inst, + struct audio_info *audio_info); + +void afmt3_audio_mute_control( + struct afmt *afmt, + bool mute); + +void afmt3_audio_info_immediate_update( + struct afmt *afmt); + +void afmt3_setup_dp_audio( + struct afmt *afmt); + void afmt3_construct(struct dcn30_afmt *afmt3, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 46ea39f5ef8d..6f3c2fb60790 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -192,6 +192,10 @@ void dcn30_link_encoder_construct( enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE; + enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; + enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; + enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 8487516819ef..7aa9aaf5db4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -77,7 +77,8 @@ static void enc3_update_hdmi_info_packet( enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, packet_index, - info_packet); + info_packet, + true); /* enable transmission of packet(s) - * packet transmission begins on the next frame */ @@ -335,7 +336,8 @@ static void enc3_dp_set_dsc_config(struct stream_encoder *enc, static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -365,7 +367,8 @@ static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc, enc1->base.vpg->funcs->update_generic_info_packet( enc1->base.vpg, 11 + i, - &pps_sdp); + &pps_sdp, + immediate_update); } /* SW should make sure VBID[6] update line number is bigger @@ -429,19 +432,22 @@ static void enc3_stream_encoder_update_dp_info_packets( enc->vpg->funcs->update_generic_info_packet( enc->vpg, 0, /* packetIndex */ - &info_frame->vsc); + &info_frame->vsc, + true); } if (info_frame->spd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 2, /* packetIndex */ - &info_frame->spd); + &info_frame->spd, + true); } if (info_frame->hdrsmd.valid) { enc->vpg->funcs->update_generic_info_packet( enc->vpg, 3, /* packetIndex */ - &info_frame->hdrsmd); + &info_frame->hdrsmd, + true); } /* packetIndex 4 is used for send immediate sdp message, and please * use other packetIndex (such as 5,6) for other info packet @@ -799,6 +805,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 23a52d47e61c..c1d967ed6551 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -488,6 +488,54 @@ void dpp3_cnv_set_bias_scale( REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); } +void dpp3_deferred_update( + struct dpp *dpp_base) +{ + int bypass_state; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->deferred_reg_writes.bits.disable_dscl) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + dpp_base->deferred_reg_writes.bits.disable_dscl = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_gamcor = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { + REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_3dlut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_shaper) { + REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_shaper = false; + } +} + static void dpp3_power_on_blnd_lut( struct dpp *dpp_base, bool power_on) @@ -495,9 +543,13 @@ static void dpp3_power_on_blnd_lut( struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; + } } else { REG_SET(CM_MEM_PWR_CTRL, 0, BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); @@ -511,9 +563,13 @@ static void dpp3_power_on_hdr3dlut( struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_3dlut = true; + } } } @@ -524,9 +580,13 @@ static void dpp3_power_on_shaper( struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_shaper = true; + } } } @@ -1400,6 +1460,7 @@ static struct dpp_funcs dcn30_dpp_funcs = { .dpp_program_blnd_lut = dpp3_program_blnd_lut, .dpp_program_shaper_lut = dpp3_program_shaper, .dpp_program_3dlut = dpp3_program_3dlut, + .dpp_deferred_update = dpp3_deferred_update, .dpp_program_bias_and_scale = NULL, .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, .set_cursor_attributes = dpp3_set_cursor_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c index 72c5687adc68..387eec616162 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c @@ -136,9 +136,13 @@ static void dpp3_power_on_gamcor_lut( struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3); - if (power_on) + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_gamcor = true; + } } else REG_SET(CM_MEM_PWR_CTRL, 0, GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index f24612523248..eac08926b574 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -356,12 +356,6 @@ void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp, { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - /*Workaround until UMD fix the new dcc_ind_blk interface */ - if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0) - dcc->dcc_ind_blk = 1; - if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0) - dcc->dcc_ind_blk_c = 1; - REG_UPDATE_6(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, dcc->enable, PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index fafed1e4a998..3e99bb9c70ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -437,7 +437,7 @@ void dcn30_init_hw(struct dc *dc) struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; - int i, j; + int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; @@ -534,41 +534,8 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - /* DP 2.0 states that LTTPR regs must be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1002,7 +969,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, /* turning off DPG */ pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false); for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) - mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); + if (mpcc_pipe->plane_res.hubp) + mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false); stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space, color_depth, solid_color, width, height, offset); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 3a5b53dd2f6d..93f32a312fee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, .update_visual_confirm_color = dcn20_update_visual_confirm_color, + .is_abm_supported = dcn21_is_abm_supported }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index a82319f4d081..95149734378b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1381,13 +1381,11 @@ int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) } -static void mpc3_mpc_init(struct mpc *mpc) +static void mpc3_set_mpc_mem_lp_mode(struct mpc *mpc) { struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); - if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPC_RMU0_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPC_RMU1_MEM_LOW_PWR_MODE) { REG_UPDATE(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, 3); @@ -1405,7 +1403,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc3_mpc_init, + .mpc_init = mpc1_mpc_init, .mpc_init_single_inst = mpc1_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, @@ -1432,6 +1430,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, + .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; void dcn30_mpc_construct(struct dcn30_mpc *mpc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 089be7347591..f5e8916601d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -73,16 +73,23 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc) OTG_H_BLANK_END, &h_blank_end); REG_UPDATE_2(OTG_GLOBAL_CONTROL1, - MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start, - MASTER_UPDATE_LOCK_DB_END_Y, v_blank_end); + MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1, + MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start); REG_UPDATE_2(OTG_GLOBAL_CONTROL4, - DIG_UPDATE_POSITION_X, 20, - DIG_UPDATE_POSITION_Y, v_blank_start); + DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1, + DIG_UPDATE_POSITION_Y, v_blank_start - 1); + // there is a DIG_UPDATE_VCOUNT_MODE and it is 0. + REG_UPDATE_3(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1, - MASTER_UPDATE_LOCK_DB_END_X, h_blank_end, + MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180, MASTER_UPDATE_LOCK_DB_EN, 1); REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1); + + REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0, + MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100, + OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1); } void optc3_lock_doublebuffer_disable(struct timing_generator *optc) @@ -325,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc3_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index a0de309475a9..79a66e0c4303 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create( vpg = dcn30_vpg_create(ctx, vpg_inst); afmt = dcn30_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1703,9 +1707,6 @@ bool dcn30_release_post_bldn_3dlut( return ret; } -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) - static bool is_soc_bounding_box_valid(struct dc *dc) { uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; @@ -1856,7 +1857,7 @@ static struct pipe_ctx *dcn30_find_split_pipe( return pipe; } -static noinline bool dcn30_internal_validate_bw( +noinline bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -1925,23 +1926,25 @@ static noinline bool dcn30_internal_validate_bw( if (vlevel == context->bw_ctx.dml.soc.num_states) goto validate_fail; - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + if (!dc->config.enable_windowed_mpo_odm) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - if (!pipe->stream) - continue; + if (!pipe->stream) + continue; - /* We only support full screen mpo with ODM */ - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_res.scl_data.recout, - &pipe->plane_res.scl_data.recout, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_res.scl_data.recout, + &pipe->plane_res.scl_data.recout, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; } - pipe_idx++; } /* merge pipes if necessary */ @@ -2125,10 +2128,10 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( int pipe_cnt, int vlevel) { + int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; int i, pipe_idx; - double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != - dm_dram_clock_change_unsupported; + double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][maxMpcComb]; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; @@ -2204,6 +2207,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; @@ -2319,7 +2323,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, goto validate_out; } + DC_FP_START(); dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index b754b89beadf..b92e4cc0232f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -55,6 +55,13 @@ unsigned int dcn30_calc_max_scaled_time( bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +bool dcn30_internal_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c index 8cfd181b4d5f..14bc44b1f886 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c @@ -43,10 +43,11 @@ vpg3->base.ctx -static void vpg3_update_generic_info_packet( +void vpg3_update_generic_info_packet( struct vpg *vpg, uint32_t packet_index, - const struct dc_info_packet *info_packet) + const struct dc_info_packet *info_packet, + bool immediate_update) { struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg); uint32_t i; @@ -106,69 +107,138 @@ static void vpg3_update_generic_info_packet( /* atomically update double-buffered GENERIC0 registers in immediate mode * (update at next block_update when block_update_lock == 0). */ - switch (packet_index) { - case 0: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC0_IMMEDIATE_UPDATE, 1); - break; - case 1: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC1_IMMEDIATE_UPDATE, 1); - break; - case 2: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC2_IMMEDIATE_UPDATE, 1); - break; - case 3: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC3_IMMEDIATE_UPDATE, 1); - break; - case 4: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC4_IMMEDIATE_UPDATE, 1); - break; - case 5: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC5_IMMEDIATE_UPDATE, 1); - break; - case 6: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC6_IMMEDIATE_UPDATE, 1); - break; - case 7: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC7_IMMEDIATE_UPDATE, 1); - break; - case 8: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC8_IMMEDIATE_UPDATE, 1); - break; - case 9: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC9_IMMEDIATE_UPDATE, 1); - break; - case 10: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC10_IMMEDIATE_UPDATE, 1); - break; - case 11: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC11_IMMEDIATE_UPDATE, 1); - break; - case 12: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC12_IMMEDIATE_UPDATE, 1); - break; - case 13: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC13_IMMEDIATE_UPDATE, 1); - break; - case 14: - REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, - VPG_GENERIC14_IMMEDIATE_UPDATE, 1); - break; - default: - break; + if (immediate_update) { + switch (packet_index) { + case 0: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC0_IMMEDIATE_UPDATE, 1); + break; + case 1: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC1_IMMEDIATE_UPDATE, 1); + break; + case 2: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC2_IMMEDIATE_UPDATE, 1); + break; + case 3: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC3_IMMEDIATE_UPDATE, 1); + break; + case 4: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC4_IMMEDIATE_UPDATE, 1); + break; + case 5: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC5_IMMEDIATE_UPDATE, 1); + break; + case 6: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC6_IMMEDIATE_UPDATE, 1); + break; + case 7: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC7_IMMEDIATE_UPDATE, 1); + break; + case 8: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC8_IMMEDIATE_UPDATE, 1); + break; + case 9: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC9_IMMEDIATE_UPDATE, 1); + break; + case 10: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC10_IMMEDIATE_UPDATE, 1); + break; + case 11: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC11_IMMEDIATE_UPDATE, 1); + break; + case 12: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC12_IMMEDIATE_UPDATE, 1); + break; + case 13: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC13_IMMEDIATE_UPDATE, 1); + break; + case 14: + REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL, + VPG_GENERIC14_IMMEDIATE_UPDATE, 1); + break; + default: + break; + } + } else { + switch (packet_index) { + case 0: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC0_FRAME_UPDATE, 1); + break; + case 1: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC1_FRAME_UPDATE, 1); + break; + case 2: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC2_FRAME_UPDATE, 1); + break; + case 3: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC3_FRAME_UPDATE, 1); + break; + case 4: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC4_FRAME_UPDATE, 1); + break; + case 5: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC5_FRAME_UPDATE, 1); + break; + case 6: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC6_FRAME_UPDATE, 1); + break; + case 7: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC7_FRAME_UPDATE, 1); + break; + case 8: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC8_FRAME_UPDATE, 1); + break; + case 9: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC9_FRAME_UPDATE, 1); + break; + case 10: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC10_FRAME_UPDATE, 1); + break; + case 11: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC11_FRAME_UPDATE, 1); + break; + case 12: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC12_FRAME_UPDATE, 1); + break; + case 13: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC13_FRAME_UPDATE, 1); + break; + case 14: + REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL, + VPG_GENERIC14_FRAME_UPDATE, 1); + break; + + default: + break; + } + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h index 6161e9e66355..ed9a5549c389 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h @@ -138,7 +138,14 @@ struct vpg_funcs { void (*update_generic_info_packet)( struct vpg *vpg, uint32_t packet_index, - const struct dc_info_packet *info_packet); + const struct dc_info_packet *info_packet, + bool immediate_update); + + void (*vpg_poweron)( + struct vpg *vpg); + + void (*vpg_powerdown)( + struct vpg *vpg); }; struct vpg { @@ -154,6 +161,12 @@ struct dcn30_vpg { const struct dcn30_vpg_mask *vpg_mask; }; +void vpg3_update_generic_info_packet( + struct vpg *vpg, + uint32_t packet_index, + const struct dc_info_packet *info_packet, + bool immediate_update); + void vpg3_construct(struct dcn30_vpg *vpg3, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 09264716d1dc..7aa628c21973 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -13,32 +13,6 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o -ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse -endif - -ifdef CONFIG_PPC64 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mpreferred-stack-boundary=4 -else -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -msse2 -endif -endif - AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN301) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 912285fdce18..7abc36a4ff76 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -82,6 +82,7 @@ #include "dce/dce_i2c.h" #include "dml/dcn30/display_mode_vba_30.h" +#include "dml/dcn301/dcn301_fpu.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" #include "amdgpu_socbb.h" @@ -91,184 +92,6 @@ #define DC_LOGGER_INIT(logger) -struct _vcs_dpi_ip_params_st dcn3_01_ip = { - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 1, - .gpuvm_max_page_table_levels = 1, - .hostvm_max_page_table_levels = 2, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 3, - .rob_buffer_size_kbytes = 184, - .det_buffer_size_kbytes = 184, - .dpte_buffer_size_in_pte_reqs_luma = 64, - .dpte_buffer_size_in_pte_reqs_chroma = 32, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, // ? - .line_buffer_fixed_bpp = 48, // ? - .dcc_supported = true, - .writeback_interface_buffer_size_kbytes = 90, - .writeback_line_buffer_buffer_size = 656640, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 4, - .max_num_dpp = 4, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.11, - .min_vblank_lines = 32, - .dppclk_delay_subtotal = 46, - .dynamic_metadata_vm_enabled = true, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 27, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 119, - .dcfclk_cstate_latency = 5.2, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .max_num_hdmi_frl_outputs = 0, - .odm_combine_4to1_supported = true, - - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .gfx7_compat_tiling_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = { - .clock_limits = { - { - .state = 0, - .dram_speed_mts = 2400.0, - .fabricclk_mhz = 600, - .socclk_mhz = 278.0, - .dcfclk_mhz = 400.0, - .dscclk_mhz = 206.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 600.0, - }, - { - .state = 1, - .dram_speed_mts = 2400.0, - .fabricclk_mhz = 688, - .socclk_mhz = 278.0, - .dcfclk_mhz = 400.0, - .dscclk_mhz = 206.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 600.0, - }, - { - .state = 2, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 278.0, - .dcfclk_mhz = 608.0, - .dscclk_mhz = 296.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - - { - .state = 3, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 715.0, - .dcfclk_mhz = 676.0, - .dscclk_mhz = 338.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - - { - .state = 4, - .dram_speed_mts = 4267.0, - .fabricclk_mhz = 1067, - .socclk_mhz = 953.0, - .dcfclk_mhz = 810.0, - .dscclk_mhz = 338.0, - .dppclk_mhz = 1015.0, - .dispclk_mhz = 1015.0, - .phyclk_mhz = 810.0, - }, - }, - - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 60.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .dram_channel_width_bytes = 4, - .fabric_datapath_to_dcn_data_return_bytes = 32, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 191, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .num_chans = 4, - .gpuvm_min_page_size_bytes = 4096, - .hostvm_min_page_size_bytes = 4096, - .dram_clock_change_latency_us = 23.84, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3550, - .xfc_bus_transport_time_us = 20, // ? - .xfc_xbuf_latency_tolerance_us = 4, // ? - .use_urgent_burst_bw = 1, // ? - .num_states = 5, - .do_urgent_latency_adjustment = false, - .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, -}; - enum dcn301_clk_src_array_id { DCN301_CLK_SRC_PLL0, DCN301_CLK_SRC_PLL1, @@ -863,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -1195,8 +1018,12 @@ struct stream_encoder *dcn301_stream_encoder_create( vpg = dcn301_vpg_create(ctx, vpg_inst); afmt = dcn301_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1476,8 +1303,6 @@ static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) static bool is_soc_bounding_box_valid(struct dc *dc) { @@ -1504,26 +1329,24 @@ static bool init_soc_bounding_box(struct dc *dc, loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = {0}; if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; + DC_FP_START(); + dcn301_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); } } return true; } + static void set_wm_ranges( struct pp_smu_funcs *pp_smu, struct _vcs_dpi_soc_bounding_box_st *loaded_bb) @@ -1546,9 +1369,9 @@ static void set_wm_ranges( ranges.reader_wm_sets[i].wm_inst = i; ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; - ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; - + DC_FP_START(); + dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb); + DC_FP_END(); ranges.num_reader_wm_sets = i + 1; } @@ -1568,66 +1391,12 @@ static void set_wm_ranges( pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); } -static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -{ - struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); - struct clk_limit_table *clk_table = &bw_params->clk_table; - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - unsigned int i, closest_clk_lvl; - int j; - - // Default clock levels are used for diags, which may lead to overclocking. - if (!IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; - dcn3_01_ip.max_num_dpp = pool->base.pipe_count; - dcn3_01_soc.num_chans = bw_params->num_channels; - - ASSERT(clk_table->num_entries); - for (i = 0; i < clk_table->num_entries; i++) { - /* loop backwards*/ - for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { - if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { - closest_clk_lvl = j; - break; - } - } - - clock_limits[i].state = i; - clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; - clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; - clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; - - clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; - clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; - clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; - clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; - clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; - clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; - clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; - } - for (i = 0; i < clk_table->num_entries; i++) - dcn3_01_soc.clock_limits[i] = clock_limits[i]; - if (clk_table->num_entries) { - dcn3_01_soc.num_states = clk_table->num_entries; - /* duplicate last level */ - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; - dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; - } - } - - dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); -} - static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, - .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, @@ -1680,9 +1449,7 @@ static bool dcn301_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; -#ifdef CONFIG_DRM_AMD_DC_DMUB dc->caps.dmcub_support = true; -#endif /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1718,6 +1485,23 @@ static bool dcn301_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h index 17e4e91ff4b8..ae8672680cdd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h @@ -32,6 +32,9 @@ struct dc; struct resource_pool; struct _vcs_dpi_display_pipe_params_st; +extern struct _vcs_dpi_ip_params_st dcn3_01_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc; + struct dcn301_resource_pool { struct resource_pool base; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 7d3ff5d44402..058f5d71e037 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -542,8 +542,12 @@ static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id vpg = dcn302_vpg_create(ctx, vpg_inst); afmt = dcn302_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -1462,7 +1466,7 @@ static const struct dccg_mask dccg_mask = { }; #define abm_regs(id)\ - [id] = { ABM_DCN301_REG_LIST(id) } + [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), @@ -1553,6 +1557,24 @@ static bool dcn302_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index dd38796ba30a..7024aeb0884c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; } + + // WA: patch strobe modes to compensate for DCN303 BW issue + if (dcn3_03_soc.num_chans <= 4) { + for (i = 0; i < dcn3_03_soc.num_states; i++) { + if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700) + break; + + if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) { + dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100; + } + } + } + /* re-init DML with updated bb */ dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); if (dc->current_state) @@ -1394,7 +1408,7 @@ static const struct dccg_mask dccg_mask = { }; #define abm_regs(id)\ - [id] = { ABM_DCN301_REG_LIST(id) } + [id] = { ABM_DCN302_REG_LIST(id) } static const struct dce_abm_registers abm_regs[] = { abm_regs(0), @@ -1486,6 +1500,23 @@ static bool dcn303_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 4bab97acb155..d20e3b8ccc30 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -11,7 +11,9 @@ # Makefile for dcn31. DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \ - dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o + dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ + dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ + dcn31_afmt.o dcn31_vpg.o ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c new file mode 100644 index 000000000000..d380a8ec2184 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "hw_shared.h" +#include "dcn30/dcn30_afmt.h" +#include "dcn31_afmt.h" +#include "reg_helper.h" +#include "dc/dc.h" + +#define DC_LOGGER \ + afmt31->base.ctx->logger + +#define REG(reg)\ + (afmt31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name + + +#define CTX \ + afmt31->base.ctx + +static struct afmt_funcs dcn31_afmt_funcs = { + .setup_hdmi_audio = afmt3_setup_hdmi_audio, + .se_audio_setup = afmt3_se_audio_setup, + .audio_mute_control = afmt3_audio_mute_control, + .audio_info_immediate_update = afmt3_audio_info_immediate_update, + .setup_dp_audio = afmt3_setup_dp_audio, + .afmt_powerdown = afmt31_powerdown, + .afmt_poweron = afmt31_poweron +}; + +void afmt31_powerdown(struct afmt *afmt) +{ + struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); + + if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) + return; + + REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1); +} + +void afmt31_poweron(struct afmt *afmt) +{ + struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt); + + if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false) + return; + + REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0); +} + +void afmt31_construct(struct dcn31_afmt *afmt31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_afmt_registers *afmt_regs, + const struct dcn31_afmt_shift *afmt_shift, + const struct dcn31_afmt_mask *afmt_mask) +{ + afmt31->base.ctx = ctx; + + afmt31->base.inst = inst; + afmt31->base.funcs = &dcn31_afmt_funcs; + + afmt31->regs = afmt_regs; + afmt31->afmt_shift = afmt_shift; + afmt31->afmt_mask = afmt_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h new file mode 100644 index 000000000000..802cb05b6ab9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h @@ -0,0 +1,126 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_AFMT_H__ +#define __DAL_DCN31_AFMT_H__ + + +#define DCN31_AFMT_FROM_AFMT(afmt)\ + container_of(afmt, struct dcn31_afmt, base) + +#define AFMT_DCN31_REG_LIST(id) \ + SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \ + SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \ + SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \ + SRI(AFMT_60958_0, AFMT, id), \ + SRI(AFMT_60958_1, AFMT, id), \ + SRI(AFMT_60958_2, AFMT, id), \ + SRI(AFMT_MEM_PWR, AFMT, id) + +struct dcn31_afmt_registers { + uint32_t AFMT_INFOFRAME_CONTROL0; + uint32_t AFMT_VBI_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL2; + uint32_t AFMT_AUDIO_SRC_CONTROL; + uint32_t AFMT_60958_0; + uint32_t AFMT_60958_1; + uint32_t AFMT_60958_2; + uint32_t AFMT_MEM_PWR; +}; + +#define DCN31_AFMT_MASK_SH_LIST(mask_sh)\ + SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\ + SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\ + SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, mask_sh),\ + SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_STATE, mask_sh) + +#define AFMT_DCN31_REG_FIELD_LIST(type) \ + type AFMT_AUDIO_INFO_UPDATE;\ + type AFMT_AUDIO_SRC_SELECT;\ + type AFMT_AUDIO_CHANNEL_ENABLE;\ + type AFMT_60958_CS_UPDATE;\ + type AFMT_AUDIO_LAYOUT_OVRD;\ + type AFMT_60958_OSF_OVRD;\ + type AFMT_60958_CS_CHANNEL_NUMBER_L;\ + type AFMT_60958_CS_CLOCK_ACCURACY;\ + type AFMT_60958_CS_CHANNEL_NUMBER_R;\ + type AFMT_60958_CS_CHANNEL_NUMBER_2;\ + type AFMT_60958_CS_CHANNEL_NUMBER_3;\ + type AFMT_60958_CS_CHANNEL_NUMBER_4;\ + type AFMT_60958_CS_CHANNEL_NUMBER_5;\ + type AFMT_60958_CS_CHANNEL_NUMBER_6;\ + type AFMT_60958_CS_CHANNEL_NUMBER_7;\ + type AFMT_AUDIO_SAMPLE_SEND;\ + type AFMT_MEM_PWR_FORCE;\ + type AFMT_MEM_PWR_DIS;\ + type AFMT_MEM_PWR_STATE + +struct dcn31_afmt_shift { + AFMT_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_afmt_mask { + AFMT_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_afmt { + struct afmt base; + const struct dcn31_afmt_registers *regs; + const struct dcn31_afmt_shift *afmt_shift; + const struct dcn31_afmt_mask *afmt_mask; +}; + +void afmt31_poweron( + struct afmt *afmt); + +void afmt31_powerdown( + struct afmt *afmt); + +void afmt31_construct(struct dcn31_afmt *afmt31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_afmt_registers *afmt_regs, + const struct dcn31_afmt_shift *afmt_shift, + const struct dcn31_afmt_mask *afmt_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c new file mode 100644 index 000000000000..de5e18c2a3ac --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c @@ -0,0 +1,173 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dc_bios_types.h" +#include "hw_shared.h" +#include "dcn31_apg.h" +#include "reg_helper.h" + +#define DC_LOGGER \ + apg31->base.ctx->logger + +#define REG(reg)\ + (apg31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + apg31->apg_shift->field_name, apg31->apg_mask->field_name + + +#define CTX \ + apg31->base.ctx + + +static void apg31_enable( + struct apg *apg) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + /* Reset APG */ + REG_UPDATE(APG_CONTROL, APG_RESET, 1); + REG_WAIT(APG_CONTROL, + APG_RESET_DONE, 1, + 1, 10); + REG_UPDATE(APG_CONTROL, APG_RESET, 0); + REG_WAIT(APG_CONTROL, + APG_RESET_DONE, 0, + 1, 10); + + /* Enable APG */ + REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1); +} + +static void apg31_disable( + struct apg *apg) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + /* Disable APG */ + REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0); +} + +static union audio_cea_channels speakers_to_channels( + struct audio_speaker_flags speaker_flags) +{ + union audio_cea_channels cea_channels = {0}; + + /* these are one to one */ + cea_channels.channels.FL = speaker_flags.FL_FR; + cea_channels.channels.FR = speaker_flags.FL_FR; + cea_channels.channels.LFE = speaker_flags.LFE; + cea_channels.channels.FC = speaker_flags.FC; + + /* if Rear Left and Right exist move RC speaker to channel 7 + * otherwise to channel 5 + */ + if (speaker_flags.RL_RR) { + cea_channels.channels.RL_RC = speaker_flags.RL_RR; + cea_channels.channels.RR = speaker_flags.RL_RR; + cea_channels.channels.RC_RLC_FLC = speaker_flags.RC; + } else { + cea_channels.channels.RL_RC = speaker_flags.RC; + } + + /* FRONT Left Right Center and REAR Left Right Center are exclusive */ + if (speaker_flags.FLC_FRC) { + cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC; + cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC; + } else { + cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC; + cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC; + } + + return cea_channels; +} + +static void apg31_se_audio_setup( + struct apg *apg, + unsigned int az_inst, + struct audio_info *audio_info) +{ + struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); + + uint32_t speakers = 0; + uint32_t channels = 0; + + ASSERT(audio_info); + /* This should not happen.it does so we don't get BSOD*/ + if (audio_info == NULL) + return; + + speakers = audio_info->flags.info.ALLSPEAKERS; + channels = speakers_to_channels(audio_info->flags.speaker_flags).all; + + /* DisplayPort only allows for one audio stream with stream ID 0 */ + REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0); + + /* When running in "pair mode", pairs of audio channels have their own enable + * this is for really old audio drivers */ + REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF); + // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels); + + /* Disable forced mem power off */ + REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0); + + apg31_enable(apg); +} + +static void apg31_audio_mute_control( + struct apg *apg, + bool mute) +{ + if (mute) + apg31_disable(apg); + else + apg31_enable(apg); +} + +static struct apg_funcs dcn31_apg_funcs = { + .se_audio_setup = apg31_se_audio_setup, + .audio_mute_control = apg31_audio_mute_control, + .enable_apg = apg31_enable, + .disable_apg = apg31_disable, +}; + +void apg31_construct(struct dcn31_apg *apg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_apg_registers *apg_regs, + const struct dcn31_apg_shift *apg_shift, + const struct dcn31_apg_mask *apg_mask) +{ + apg31->base.ctx = ctx; + + apg31->base.inst = inst; + apg31->base.funcs = &dcn31_apg_funcs; + + apg31->regs = apg_regs; + apg31->apg_shift = apg_shift; + apg31->apg_mask = apg_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h new file mode 100644 index 000000000000..24f568e120d8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h @@ -0,0 +1,115 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_AGP_H__ +#define __DAL_DCN31_AGP_H__ + + +#define DCN31_APG_FROM_APG(apg)\ + container_of(apg, struct dcn31_apg, base) + +#define APG_DCN31_REG_LIST(id) \ + SRI(APG_CONTROL, APG, id), \ + SRI(APG_CONTROL2, APG, id),\ + SRI(APG_MEM_PWR, APG, id),\ + SRI(APG_DBG_GEN_CONTROL, APG, id) + +struct dcn31_apg_registers { + uint32_t APG_CONTROL; + uint32_t APG_CONTROL2; + uint32_t APG_MEM_PWR; + uint32_t APG_DBG_GEN_CONTROL; +}; + + +#define DCN31_APG_MASK_SH_LIST(mask_sh)\ + SE_SF(APG0_APG_CONTROL, APG_RESET, mask_sh),\ + SE_SF(APG0_APG_CONTROL, APG_RESET_DONE, mask_sh),\ + SE_SF(APG0_APG_CONTROL2, APG_ENABLE, mask_sh),\ + SE_SF(APG0_APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, mask_sh),\ + SE_SF(APG0_APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, mask_sh),\ + SE_SF(APG0_APG_MEM_PWR, APG_MEM_PWR_FORCE, mask_sh) + +#define APG_DCN31_REG_FIELD_LIST(type) \ + type APG_RESET;\ + type APG_RESET_DONE;\ + type APG_ENABLE;\ + type APG_DP_AUDIO_STREAM_ID;\ + type APG_DBG_AUDIO_CHANNEL_ENABLE;\ + type APG_MEM_PWR_FORCE + +struct dcn31_apg_shift { + APG_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_apg_mask { + APG_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct apg { + const struct apg_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +struct apg_funcs { + + void (*setup_hdmi_audio)( + struct apg *apg); + + void (*se_audio_setup)( + struct apg *apg, + unsigned int az_inst, + struct audio_info *audio_info); + + void (*audio_mute_control)( + struct apg *apg, + bool mute); + + void (*enable_apg)( + struct apg *apg); + + void (*disable_apg)( + struct apg *apg); +}; + + + +struct dcn31_apg { + struct apg base; + const struct dcn31_apg_registers *regs; + const struct dcn31_apg_shift *apg_shift; + const struct dcn31_apg_mask *apg_mask; +}; + +void apg31_construct(struct dcn31_apg *apg3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_apg_registers *apg_regs, + const struct dcn31_apg_shift *apg_shift, + const struct dcn31_apg_mask *apg_mask); + + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 696c9307715d..815481a3ef54 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -26,6 +26,7 @@ #include "reg_helper.h" #include "core_types.h" #include "dcn31_dccg.h" +#include "dal_asic_id.h" #define TO_DCN_DCCG(dccg)\ container_of(dccg, struct dcn_dccg, base) @@ -42,6 +43,358 @@ #define DC_LOGGER \ dccg->ctx->logger +static void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (dccg->ref_dppclk && req_dppclk) { + int ref_dppclk = dccg->ref_dppclk; + int modulo, phase; + + // phase / modulo = dpp pipe clk / dpp global clk + modulo = 0xff; // use FF at the end + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; + + if (phase > 0xff) { + ASSERT(false); + phase = 0xff; + } + + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, phase, + DPPCLK0_DTO_MODULO, modulo); + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 1); + } else { + //DTO must be enabled to generate a 0Hz clock output + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) { + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 1); + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, 0, + DPPCLK0_DTO_MODULO, 1); + } else { + REG_UPDATE(DPPCLK_DTO_CTRL, + DPPCLK_DTO_ENABLE[dpp_inst], 0); + } + } + dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; +} + +static enum phyd32clk_clock_source get_phy_mux_symclk( + struct dcn_dccg *dccg_dcn, + enum phyd32clk_clock_source src) +{ + if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if (src == PHYD32CLKC) + src = PHYD32CLKF; + if (src == PHYD32CLKD) + src = PHYD32CLKG; + } + return src; +} + +static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* enabled to select one of the DTBCLKs for pipe */ + switch (otg_inst) { + case 0: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE0_EN, 1); + break; + case 1: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE1_EN, 1); + break; + case 2: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE2_EN, 1); + break; + case 3: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE3_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 1); +} + +static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 0); + + switch (otg_inst) { + case 0: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE0_EN, 0); + break; + case 1: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE1_EN, 0); + break; + case 2: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE2_EN, 0); + break; + case 3: + REG_UPDATE(DPSTREAMCLK_CNTL, + DPSTREAMCLK_PIPE3_EN, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_set_dpstreamclk( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst) +{ + if (src == REFCLK) + dccg31_disable_dpstreamclk(dccg, otg_inst); + else + dccg31_enable_dpstreamclk(dccg, otg_inst); +} + +void dccg31_enable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); + + /* select one of the PHYD32CLKs as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE0_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, phyd32clk, + SYMCLK32_SE0_EN, 1); + break; + case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE1_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, phyd32clk, + SYMCLK32_SE1_EN, 1); + break; + case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE2_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, phyd32clk, + SYMCLK32_SE2_EN, 1); + break; + case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE3_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, phyd32clk, + SYMCLK32_SE3_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_se */ + switch (hpo_se_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE0_SRC_SEL, 0, + SYMCLK32_SE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); + break; + case 1: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE1_SRC_SEL, 0, + SYMCLK32_SE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); + break; + case 2: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE2_SRC_SEL, 0, + SYMCLK32_SE2_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); + break; + case 3: + REG_UPDATE_2(SYMCLK32_SE_CNTL, + SYMCLK32_SE3_SRC_SEL, 0, + SYMCLK32_SE3_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_enable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk); + + /* select one of the PHYD32CLKs as the source for symclk32_le */ + switch (hpo_le_inst) { + case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE0_SRC_SEL, phyd32clk, + SYMCLK32_LE0_EN, 1); + break; + case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE1_SRC_SEL, phyd32clk, + SYMCLK32_LE1_EN, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +void dccg31_disable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + /* set refclk as the source for symclk32_le */ + switch (hpo_le_inst) { + case 0: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE0_SRC_SEL, 0, + SYMCLK32_LE0_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); + break; + case 1: + REG_UPDATE_2(SYMCLK32_LE_CNTL, + SYMCLK32_LE1_SRC_SEL, 0, + SYMCLK32_LE1_EN, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg31_disable_dscclk(struct dccg *dccg, int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + return; + //DTO must be enabled to generate a 0 Hz clock output + switch (inst) { + case 0: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK0_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK0_DTO_PARAM, + DSCCLK0_DTO_PHASE, 0, + DSCCLK0_DTO_MODULO, 1); + break; + case 1: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK1_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK1_DTO_PARAM, + DSCCLK1_DTO_PHASE, 0, + DSCCLK1_DTO_MODULO, 1); + break; + case 2: + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK2_DTO_ENABLE, 1); + REG_UPDATE_2(DSCCLK2_DTO_PARAM, + DSCCLK2_DTO_PHASE, 0, + DSCCLK2_DTO_MODULO, 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + +static void dccg31_enable_dscclk(struct dccg *dccg, int inst) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + return; + //Disable DTO + switch (inst) { + case 0: + REG_UPDATE_2(DSCCLK0_DTO_PARAM, + DSCCLK0_DTO_PHASE, 0, + DSCCLK0_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK0_DTO_ENABLE, 0); + break; + case 1: + REG_UPDATE_2(DSCCLK1_DTO_PARAM, + DSCCLK1_DTO_PHASE, 0, + DSCCLK1_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK1_DTO_ENABLE, 0); + break; + case 2: + REG_UPDATE_2(DSCCLK2_DTO_PARAM, + DSCCLK2_DTO_PHASE, 0, + DSCCLK2_DTO_MODULO, 0); + REG_UPDATE(DSCCLK_DTO_CTRL, + DSCCLK2_DTO_ENABLE, 0); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, @@ -241,16 +594,44 @@ static void dccg31_set_dispclk_change_mode( void dccg31_init(struct dccg *dccg) { + /* Set HPO stream encoder to use refclk to avoid case where PHY is + * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which + * will cause DCN to hang. + */ + dccg31_disable_symclk32_se(dccg, 0); + dccg31_disable_symclk32_se(dccg, 1); + dccg31_disable_symclk32_se(dccg, 2); + dccg31_disable_symclk32_se(dccg, 3); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) { + dccg31_disable_symclk32_le(dccg, 0); + dccg31_disable_symclk32_le(dccg, 1); + } + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + dccg31_disable_dpstreamclk(dccg, 0); + dccg31_disable_dpstreamclk(dccg, 1); + dccg31_disable_dpstreamclk(dccg, 2); + dccg31_disable_dpstreamclk(dccg, 3); + } + } static const struct dccg_funcs dccg31_funcs = { - .update_dpp_dto = dccg2_update_dpp_dto, + .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg31_init, + .set_dpstreamclk = dccg31_set_dpstreamclk, + .enable_symclk32_se = dccg31_enable_symclk32_se, + .disable_symclk32_se = dccg31_disable_symclk32_se, + .enable_symclk32_le = dccg31_enable_symclk32_le, + .disable_symclk32_le = dccg31_disable_symclk32_le, .set_physymclk = dccg31_set_physymclk, .set_dtbclk_dto = dccg31_set_dtbclk_dto, .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto, .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, + .disable_dsc = dccg31_disable_dscclk, + .enable_dsc = dccg31_enable_dscclk, }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 706ad80ba873..a013a32bbaf7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -61,7 +61,13 @@ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\ SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\ SR(DCCG_AUDIO_DTO_SOURCE),\ - SR(DENTIST_DISPCLK_CNTL) + SR(DENTIST_DISPCLK_CNTL),\ + SR(DSCCLK0_DTO_PARAM),\ + SR(DSCCLK1_DTO_PARAM),\ + SR(DSCCLK2_DTO_PARAM),\ + SR(DSCCLK_DTO_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL3),\ + SR(HDMISTREAMCLK0_DTO_PARAM) #define DCCG_MASK_SH_LIST_DCN31(mask_sh) \ @@ -119,7 +125,26 @@ DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\ - DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh) + DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \ + DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\ + DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\ + DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh) struct dccg *dccg31_create( @@ -130,6 +155,29 @@ struct dccg *dccg31_create( void dccg31_init(struct dccg *dccg); +void dccg31_set_dpstreamclk( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst); + +void dccg31_enable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk); + +void dccg31_disable_symclk32_se( + struct dccg *dccg, + int hpo_se_inst); + +void dccg31_enable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk); + +void dccg31_disable_symclk32_le( + struct dccg *dccg, + int hpo_le_inst); + void dccg31_set_physymclk( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 90127c1f9e35..ee6f13bef377 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -37,6 +37,7 @@ #include "link_enc_cfg.h" #include "dc_dmub_srv.h" +#include "dal_asic_id.h" #define CTX \ enc10->base.ctx @@ -62,6 +63,10 @@ #define AUX_REG_WRITE(reg_name, val) \ dm_write_reg(CTX, AUX_REG(reg_name), val) +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + void dcn31_link_encoder_set_dio_phy_mux( struct link_encoder *enc, enum encoder_type_select sel, @@ -215,8 +220,8 @@ static const struct link_encoder_funcs dcn31_link_enc_funcs = { .fec_is_active = enc2_fec_is_active, .get_dig_frontend = dcn10_get_dig_frontend, .get_dig_mode = dcn10_get_dig_mode, - .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode, - .get_max_link_cap = dcn20_link_encoder_get_max_link_cap, + .is_in_alt_mode = dcn31_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn31_link_encoder_get_max_link_cap, .set_dio_phy_mux = dcn31_link_encoder_set_dio_phy_mux, }; @@ -320,6 +325,10 @@ void dcn31_link_encoder_construct( enc10->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE; + enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN; + enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN; + enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN; enc10->base.features.flags.bits.DP_IS_USB_C = bp_cap_info.DP_IS_USB_C; } else { @@ -357,19 +366,79 @@ void dcn31_link_encoder_construct_minimal( SIGNAL_TYPE_EDP; } +/* DPIA equivalent of link_transmitter_control. */ +static bool link_dpia_control(struct dc_context *dc_ctx, + struct dmub_cmd_dig_dpia_control_data *dpia_control) +{ + union dmub_rb_cmd cmd; + struct dc_dmub_srv *dmub = dc_ctx->dmub_srv; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA; + cmd.dig1_dpia_control.header.sub_type = + DMUB_CMD__DPIA_DIG1_DPIA_CONTROL; + cmd.dig1_dpia_control.header.payload_bytes = + sizeof(cmd.dig1_dpia_control) - + sizeof(cmd.dig1_dpia_control.header); + + cmd.dig1_dpia_control.dpia_control = *dpia_control; + + dc_dmub_srv_cmd_queue(dmub, &cmd); + dc_dmub_srv_cmd_execute(dmub); + dc_dmub_srv_wait_idle(dmub); + + return true; +} + +static void link_encoder_disable(struct dcn10_link_encoder *enc10) +{ + /* reset training complete */ + REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0); +} + void dcn31_link_encoder_enable_dp_output( struct link_encoder *enc, const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + /* Enable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source); } else { - /** @todo Handle transmitter with programmable mapping to link encoder. */ + struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; + struct dc_link *link; + + link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); + + enc1_configure_encoder(enc10, link_settings); + + dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE; + dpia_control.enc_id = enc->preferred_engine; + dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */ + dpia_control.lanenum = (uint8_t)link_settings->lane_count; + dpia_control.symclk_10khz = link_settings->link_rate * + LINK_RATE_REF_FREQ_IN_KHZ / 10; + /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin + * unused by DPIA. + */ + dpia_control.hpdsel = 6; + + if (link) { + dpia_control.dpia_id = link->ddc_hw_inst; + dpia_control.fec_rdy = dc_link_should_enable_fec(link); + } else { + DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); + BREAK_TO_DEBUGGER(); + return; + } + + link_dpia_control(enc->ctx, &dpia_control); } } @@ -378,14 +447,43 @@ void dcn31_link_encoder_enable_dp_mst_output( const struct dc_link_settings *link_settings, enum clock_source_id clock_source) { + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + /* Enable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); } else { - /** @todo Handle transmitter with programmable mapping to link encoder. */ + struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; + struct dc_link *link; + + link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); + + enc1_configure_encoder(enc10, link_settings); + + dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE; + dpia_control.enc_id = enc->preferred_engine; + dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */ + dpia_control.lanenum = (uint8_t)link_settings->lane_count; + dpia_control.symclk_10khz = link_settings->link_rate * + LINK_RATE_REF_FREQ_IN_KHZ / 10; + /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin + * unused by DPIA. + */ + dpia_control.hpdsel = 6; + + if (link) { + dpia_control.dpia_id = link->ddc_hw_inst; + dpia_control.fec_rdy = dc_link_should_enable_fec(link); + } else { + DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); + BREAK_TO_DEBUGGER(); + return; + } + + link_dpia_control(enc->ctx, &dpia_control); } } @@ -393,14 +491,102 @@ void dcn31_link_encoder_disable_output( struct link_encoder *enc, enum signal_type signal) { + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + /* Disable transmitter and encoder. */ - if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) { + if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) { dcn10_link_encoder_disable_output(enc, signal); } else { - /** @todo Handle transmitter with programmable mapping to link encoder. */ + struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 }; + struct dc_link *link; + + if (!dcn10_is_dig_enabled(enc)) + return; + + link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine); + + dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE; + dpia_control.enc_id = enc->preferred_engine; + if (signal == SIGNAL_TYPE_DISPLAY_PORT) { + dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */ + } else if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */ + } else { + DC_LOG_ERROR("%s: USB4 DPIA only supports DisplayPort.\n", __func__); + BREAK_TO_DEBUGGER(); + } + + if (link) { + dpia_control.dpia_id = link->ddc_hw_inst; + } else { + DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__); + BREAK_TO_DEBUGGER(); + return; + } + + link_dpia_control(enc->ctx, &dpia_control); + + link_encoder_disable(enc10); } } +bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t dp_alt_mode_disable; + bool is_usb_c_alt_mode = false; + + if (enc->features.flags.bits.DP_IS_USB_C) { + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + // [Note] no need to check hw_internal_rev once phy mux selection is ready + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } else { + /* + * B0 phys use a new set of registers to check whether alt mode is disabled. + * if value == 1 alt mode is disabled, otherwise it is enabled. + */ + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } else { + // [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + } + } + + is_usb_c_alt_mode = (dp_alt_mode_disable == 0); + } + + return is_usb_c_alt_mode; +} + +void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t is_in_usb_c_dp4_mode = 0; + + dcn10_link_encoder_get_max_link_cap(enc, link_settings); + + /* in usb c dp2 mode, max lane count is 2 */ + if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { + if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { + // [Note] no need to check hw_internal_rev once phy mux selection is ready + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); + } else { + if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) + || (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); + } else { + REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); + } + } + if (!is_in_usb_c_dp4_mode) + link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h index 32d146312838..3454f1e7c1f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h @@ -69,6 +69,7 @@ SRI(RDPCSTX_PHY_CNTL4, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL5, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL6, RDPCSTX, id), \ + SRI(RDPCSPIPE_PHY_CNTL6, RDPCSPIPE, id), \ SRI(RDPCSTX_PHY_CNTL7, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL8, RDPCSTX, id), \ SRI(RDPCSTX_PHY_CNTL9, RDPCSTX, id), \ @@ -115,7 +116,9 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_MPLL_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_MPLL_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + LE_SF(RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_QUOT, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL7, RDPCS_PHY_DP_MPLLB_FRACN_DEN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL8, RDPCS_PHY_DP_MPLLB_SSC_PEAK, mask_sh),\ @@ -243,4 +246,13 @@ void dcn31_link_encoder_disable_output( struct link_encoder *enc, enum signal_type signal); +/* + * Check whether USB-C DP Alt mode is disabled + */ +bool dcn31_link_encoder_is_in_alt_mode( + struct link_encoder *enc); + +void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings); + #endif /* __DC_LINK_ENCODER__DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c new file mode 100644 index 000000000000..6c08e21bb708 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -0,0 +1,616 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_link_encoder.h" +#include "reg_helper.h" +#include "dc_link.h" +#include "stream_encoder.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name + + +#define CTX \ + enc3->base.ctx + +enum { + DP_SAT_UPDATE_MAX_RETRY = 200 +}; + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_link_enabled; + + /* get current status of link enabled */ + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &dp_link_enabled); + + /* Enable clocks first */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1); + + /* Reset DPHY. Only reset if going from disable to enable */ + if (!dp_link_enabled) { + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0); + } + + /* Configure DPHY settings */ + REG_UPDATE_3(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 1, + PRECODER_ENABLE, 1, + NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3); +} + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + /* Configure DPHY settings */ + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + DPHY_ENABLE, 0); + + /* Shut down clock last */ + REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0); +} + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t tp_custom; + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_ACTIVE); + break; + case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS1); + break; + case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE: + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_LINK_TRAINING_TPS2); + break; + case DP_TEST_PATTERN_128b_132b_TPS1: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS1, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS1); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT1, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT2, DP_DPHY_TP_SELECT_TPS2, + TP_SELECT3, DP_DPHY_TP_SELECT_TPS2); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS7: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS7, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS7); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS9: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS9, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS9); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS11: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS11, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS11); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS15: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS15, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS15); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS23: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS23, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS23); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_PRBS31: + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_PRBS_SEL0, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL1, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL2, DP_DPHY_TP_PRBS31, + TP_PRBS_SEL3, DP_DPHY_TP_PRBS31); + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT1, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT2, DP_DPHY_TP_SELECT_PRBS, + TP_SELECT3, DP_DPHY_TP_SELECT_PRBS); + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom); + tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30]; + REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM, + TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + case DP_TEST_PATTERN_SQUARE_PULSE: + REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0, + TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]); + + REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG, + TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE, + TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE); + + REG_UPDATE(DP_DPHY_SYM32_CONTROL, + MODE, DP2_TEST_PATTERN); + break; + default: + break; + } +} + +static void fill_stream_allocation_row_info( + const struct link_mst_stream_allocation *stream_allocation, + uint32_t *src, + uint32_t *slots) +{ + const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc; + + if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) { + *src = stream_enc->id - ENGINE_ID_HPO_DP_0; + *slots = stream_allocation->slot_count; + } else { + *src = 0; + *slots = 0; + } +} + +/* programs DP VC payload allocation */ +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t slots = 0; + uint32_t src = 0; + + /* --- Set MSE Stream Attribute - + * Setup VC Payload Table on Tx Side, + * Issue allocation change trigger + * to commit payload on both tx and rx side + */ + + /* we should clean-up table each time */ + + if (table->stream_count >= 1) { + fill_stream_allocation_row_info( + &table->stream_allocations[0], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 2) { + fill_stream_allocation_row_info( + &table->stream_allocations[1], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 3) { + fill_stream_allocation_row_info( + &table->stream_allocations[2], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + if (table->stream_count >= 4) { + fill_stream_allocation_row_info( + &table->stream_allocations[3], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, src, + SAT_SLOT_COUNT, slots); + + /* --- wait for transaction finish */ + + /* send allocation change trigger (ACT) + * this step first sends the ACT, + * then double buffers the SAT into the hardware + * making the new allocation active on the DP MST mode link + */ + + /* SAT_UPDATE: + * 0 - No Action + * 1 - Update SAT with trigger + * 2 - Update SAT without trigger + */ + REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE, + SAT_UPDATE, 1); + + /* wait for update to complete + * (i.e. SAT_UPDATE_PENDING field is set to 0) + * No need for HW to enforce keepout. + */ + /* Best case and worst case wait time for SAT_UPDATE_PENDING + * best: 109 us + * worst: 868 us + */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + SAT_UPDATE_PENDING, 0, + 10, DP_SAT_UPDATE_MAX_RETRY); +} + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t x = dc_fixpt_floor( + avg_time_slots_per_mtp); + uint32_t y = dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + x), + 25)); + + switch (stream_encoder_inst) { + case 0: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 1: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 2: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + case 3: + REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0, + STREAM_VC_RATE_X, x, + STREAM_VC_RATE_Y, y); + break; + default: + ASSERT(0); + } + + /* Best case and worst case wait time for RATE_UPDATE_PENDING + * best: 116 ns + * worst: 903 ns + */ + /* wait for update to be completed on the link */ + REG_WAIT(DP_DPHY_SYM32_STATUS, + RATE_UPDATE_PENDING, 0, + 1, 10); +} + +static bool dcn31_hpo_dp_link_enc_is_in_alt_mode( + struct hpo_dp_link_encoder *enc) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + uint32_t dp_alt_mode_disable = 0; + + ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E)); + + /* if value == 1 alt mode is disabled, otherwise it is enabled */ + REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); + return (dp_alt_mode_disable == 0); +} + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + + ASSERT(state); + + REG_GET(DP_DPHY_SYM32_STATUS, + STATUS, &state->link_enc_enabled); + REG_GET(DP_DPHY_SYM32_CONTROL, + NUM_LANES, &state->lane_count); + REG_GET(DP_DPHY_SYM32_CONTROL, + MODE, (uint32_t *)&state->link_mode); + + REG_GET_2(DP_DPHY_SYM32_SAT_VC0, + SAT_STREAM_SOURCE, &state->stream_src[0], + SAT_SLOT_COUNT, &state->slot_count[0]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC1, + SAT_STREAM_SOURCE, &state->stream_src[1], + SAT_SLOT_COUNT, &state->slot_count[1]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC2, + SAT_STREAM_SOURCE, &state->stream_src[2], + SAT_SLOT_COUNT, &state->slot_count[2]); + REG_GET_2(DP_DPHY_SYM32_SAT_VC3, + SAT_STREAM_SOURCE, &state->stream_src[3], + SAT_SLOT_COUNT, &state->slot_count[3]); + + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, + STREAM_VC_RATE_X, &state->vc_rate_x[0], + STREAM_VC_RATE_Y, &state->vc_rate_y[0]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, + STREAM_VC_RATE_X, &state->vc_rate_x[1], + STREAM_VC_RATE_Y, &state->vc_rate_y[1]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, + STREAM_VC_RATE_X, &state->vc_rate_x[2], + STREAM_VC_RATE_Y, &state->vc_rate_y[2]); + REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, + STREAM_VC_RATE_X, &state->vc_rate_x[3], + STREAM_VC_RATE_Y, &state->vc_rate_y[3]); +} + +static enum bp_result link_transmitter_control( + struct dcn31_hpo_dp_link_encoder *enc3, + struct bp_transmitter_control *cntl) +{ + enum bp_result result; + struct dc_bios *bp = enc3->base.ctx->dc_bios; + + result = bp->funcs->transmitter_control(bp, cntl); + + return result; +} + +/* enables DP PHY output for 128b132b encoding */ +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Set the transmitter */ + enc3->base.transmitter = transmitter; + + /* Enable the PHY */ + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc3->base.transmitter; + //cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.action = TRANSMITTER_CONTROL_DISABLE; + cntl.transmitter = enc3->base.transmitter; + cntl.hpd_sel = enc3->base.hpd_source; + cntl.signal = signal; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + + /* disable encoder */ + dcn31_hpo_dp_link_enc_disable(enc); +} + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset) +{ + struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* disable transmitter */ + cntl.transmitter = enc3->base.transmitter; + cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.pixel_clock = link_settings->link_rate * 1000; + cntl.lane_settings = ffe_preset; + + result = link_transmitter_control(enc3, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } +} + +static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = { + .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output, + .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output, + .link_enable = dcn31_hpo_dp_link_enc_enable, + .link_disable = dcn31_hpo_dp_link_enc_disable, + .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern, + .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table, + .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size, + .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode, + .read_state = dcn31_hpo_dp_link_enc_read_state, + .set_ffe = dcn31_hpo_dp_link_enc_set_ffe, +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask) +{ + enc31->base.ctx = ctx; + + enc31->base.inst = inst; + enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs; + enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN; + enc31->base.transmitter = TRANSMITTER_UNKNOWN; + + enc31->regs = hpo_le_regs; + enc31->hpo_le_shift = hpo_le_shift; + enc31->hpo_le_mask = hpo_le_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h new file mode 100644 index 000000000000..0706ccaf6fec --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h @@ -0,0 +1,222 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__ + +#include "link_encoder.h" + + +#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\ + container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base) + + +#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \ + SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \ + SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \ + SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id) + +#define DCN3_1_RDPCSTX_REG_LIST(id) \ + SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id) + + +#define DCN3_1_HPO_DP_LINK_ENC_REGS \ + uint32_t DP_LINK_ENC_CLOCK_CONTROL;\ + uint32_t DP_DPHY_SYM32_CONTROL;\ + uint32_t DP_DPHY_SYM32_STATUS;\ + uint32_t DP_DPHY_SYM32_TP_CONFIG;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\ + uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\ + uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\ + uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\ + uint32_t DP_DPHY_SYM32_SAT_VC0;\ + uint32_t DP_DPHY_SYM32_SAT_VC1;\ + uint32_t DP_DPHY_SYM32_SAT_VC2;\ + uint32_t DP_DPHY_SYM32_SAT_VC3;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\ + uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\ + uint32_t DP_DPHY_SYM32_SAT_UPDATE + +struct dcn31_hpo_dp_link_encoder_registers { + DCN3_1_HPO_DP_LINK_ENC_REGS; + uint32_t RDPCSTX_PHY_CNTL6[5]; +}; + +#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\ + SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\ + SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh) + +#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \ + type DP_LINK_ENC_CLOCK_EN;\ + type DPHY_RESET;\ + type DPHY_ENABLE;\ + type PRECODER_ENABLE;\ + type NUM_LANES;\ + type MODE;\ + type STATUS;\ + type SAT_UPDATE_PENDING;\ + type RATE_UPDATE_PENDING;\ + type TP_CUSTOM;\ + type TP_SELECT0;\ + type TP_SELECT1;\ + type TP_SELECT2;\ + type TP_SELECT3;\ + type TP_PRBS_SEL0;\ + type TP_PRBS_SEL1;\ + type TP_PRBS_SEL2;\ + type TP_PRBS_SEL3;\ + type TP_SQ_PULSE_WIDTH;\ + type SAT_STREAM_SOURCE;\ + type SAT_SLOT_COUNT;\ + type STREAM_VC_RATE_X;\ + type STREAM_VC_RATE_Y;\ + type SAT_UPDATE;\ + type RDPCS_PHY_DPALT_DISABLE + + +struct dcn31_hpo_dp_link_encoder_shift { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_link_encoder_mask { + DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_link_encoder { + struct hpo_dp_link_encoder base; + const struct dcn31_hpo_dp_link_encoder_registers *regs; + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift; + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask; +}; + +void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs, + const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift, + const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask); + +void dcn31_hpo_dp_link_enc_enable_dp_output( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter); + +void dcn31_hpo_dp_link_enc_disable_output( + struct hpo_dp_link_encoder *enc, + enum signal_type signal); + +void dcn31_hpo_dp_link_enc_enable( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes); + +void dcn31_hpo_dp_link_enc_disable( + struct hpo_dp_link_encoder *enc); + +void dcn31_hpo_dp_link_enc_set_link_test_pattern( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params); + +void dcn31_hpo_dp_link_enc_update_stream_allocation_table( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + +void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp); + +void dcn31_hpo_dp_link_enc_read_state( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state); + +void dcn31_hpo_dp_link_enc_set_ffe( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset); + +#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c new file mode 100644 index 000000000000..5065904c7833 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -0,0 +1,752 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn31_hpo_dp_stream_encoder.h" +#include "reg_helper.h" +#include "dc_link.h" + +#define DC_LOGGER \ + enc3->base.ctx->logger + +#define REG(reg)\ + (enc3->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name + +#define CTX \ + enc3->base.ctx + + +enum dp2_pixel_encoding { + DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422, + DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420, + DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY +}; + +enum dp2_uncompressed_component_depth { + DP_SYM32_ENC_COMPONENT_DEPTH_6BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_8BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_10BPC, + DP_SYM32_ENC_COMPONENT_DEPTH_12BPC +}; + + +static void dcn31_hpo_dp_stream_enc_enable_stream( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable all clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 1); + + /* Assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 1); + /* Wait for reset to complete (to assert) */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 1, + 1, 10); + + /* De-assert reset to the DP_SYM32_ENC logic */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET, 0); + /* Wait for reset to de-assert */ + REG_WAIT(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_RESET_DONE, 0, + 1, 10); + + /* Enable idle pattern generation */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_unblank( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_source) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source); + + /* Enable video transmission in main framer */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 1); + + /* Reset and Enable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 1); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_RESET, 0); + REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */ + PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0, + 1, 10); + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 1); + + /* Reset and Enable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 1); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 1, + 1, 10); + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET, 0); + REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_RESET_DONE, 0, + 1, 10); + + /* For Debug -- Enable CRC */ + REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL, + CRC_ENABLE, 1, + CRC_CONT_MODE_ENABLE, 1); + + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_dp_blank( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable video transmission */ + REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, 0); + + /* Wait for video stream transmission disabled + * Larger delay to wait until VBLANK - use max retry of + * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode + + * a little more because we may not trust delay accuracy. + */ + //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL, + // VID_STREAM_STATUS, 0, + // 10, 5000); + + /* Disable SDP tranmission */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable Pixel to Symbol FIFO */ + REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL, + PIXEL_TO_SYMBOL_FIFO_ENABLE, 0); + + /* Disable Clock Ramp Adjuster FIFO */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, + FIFO_ENABLE, 0); +} + +static void dcn31_hpo_dp_stream_enc_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable DP_SYM32_ENC */ + REG_UPDATE(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, 0); + + /* Disable clocks in the DP_STREAM_ENC */ + REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL, + DP_STREAM_ENC_CLOCK_EN, 0); +} + +static void dcn31_hpo_dp_stream_enc_set_stream_attribute( + struct hpo_dp_stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, + bool compressed_format, + bool double_buffer_en) +{ + enum dp2_pixel_encoding pixel_encoding; + enum dp2_uncompressed_component_depth component_depth; + uint32_t h_active_start; + uint32_t v_active_start; + uint32_t h_blank; + uint32_t h_back_porch; + uint32_t h_width; + uint32_t v_height; + unsigned long long v_freq; + uint8_t misc0 = 0; + uint8_t misc1 = 0; + uint8_t hsp; + uint8_t vsp; + + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + struct dc_crtc_timing hw_crtc_timing = *crtc_timing; + + /* MISC0[0] = 0 video and link clocks are asynchronous + * MISC1[0] = 0 interlace not supported + * MISC1[2:1] = 0 stereo field is handled by hardware + * MISC1[5:3] = 0 Reserved + */ + + /* Interlaced not supported */ + if (hw_crtc_timing.flags.INTERLACE) { + BREAK_TO_DEBUGGER(); + } + + /* Double buffer enable for MSA and pixel format registers + * Only double buffer for changing stream attributes for active streams + * Do not double buffer when initially enabling a stream + */ + REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, + MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en); + REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, + PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en); + + /* Pixel Encoding */ + switch (hw_crtc_timing.pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422; + misc0 = misc0 | 0x2; // MISC0[2:1] = 01 + break; + case PIXEL_ENCODING_YCBCR444: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + misc0 = misc0 | 0x4; // MISC0[2:1] = 10 + + if (hw_crtc_timing.flags.Y_ONLY) { + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY; + if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) { + /* HW testing only, no use case yet. + * Color depth of Y-only could be + * 8, 10, 12, 16 bits + */ + misc1 = misc1 | 0x80; // MISC1[7] = 1 + } + } + break; + case PIXEL_ENCODING_YCBCR420: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420; + misc1 = misc1 | 0x40; // MISC1[6] = 1 + break; + case PIXEL_ENCODING_RGB: + default: + pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444; + break; + } + + /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used. + * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the + * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, + * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). + */ + if (use_vsc_sdp_for_colorimetry) + misc1 = misc1 | 0x40; + else + misc1 = misc1 & ~0x40; + + /* Color depth */ + switch (hw_crtc_timing.display_color_depth) { + case COLOR_DEPTH_666: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + // MISC0[7:5] = 000 + break; + case COLOR_DEPTH_888: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC; + misc0 = misc0 | 0x20; // MISC0[7:5] = 001 + break; + case COLOR_DEPTH_101010: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC; + misc0 = misc0 | 0x40; // MISC0[7:5] = 010 + break; + case COLOR_DEPTH_121212: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC; + misc0 = misc0 | 0x60; // MISC0[7:5] = 011 + break; + default: + component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC; + break; + } + + REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, component_depth); + + switch (output_color_space) { + case COLOR_SPACE_SRGB: + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_SRGB_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_ADOBERGB: + case COLOR_SPACE_DCIP3: + case COLOR_SPACE_XV_YCC_709: + case COLOR_SPACE_XV_YCC_601: + case COLOR_SPACE_DISPLAYNATIVE: + case COLOR_SPACE_DOLBYVISION: + case COLOR_SPACE_APPCTRL: + case COLOR_SPACE_CUSTOMPOINTS: + case COLOR_SPACE_UNKNOWN: + case COLOR_SPACE_YCBCR709_BLACK: + /* do nothing */ + break; + } + + /* calculate from vesa timing parameters + * h_active_start related to leading edge of sync + */ + h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - + hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; + + h_back_porch = h_blank - hw_crtc_timing.h_front_porch - + hw_crtc_timing.h_sync_width; + + /* start at beginning of left border */ + h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; + + v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - + hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - + hw_crtc_timing.v_front_porch; + + h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; + v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; + hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; + vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; + v_freq = hw_crtc_timing.pix_clk_100hz * 100; + + /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 + * + * Lane 0 Lane 1 Lane 2 Lane 3 + * MSA[0] = { 0, 0, 0, VFREQ[47:40]} + * MSA[1] = { 0, 0, 0, VFREQ[39:32]} + * MSA[2] = { 0, 0, 0, VFREQ[31:24]} + * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]} + * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]} + * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]} + * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]} + * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]} + * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0} + */ + REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, v_freq >> 40); + + REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 32) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0, + MSA_DATA_LANE_0, 0, + MSA_DATA_LANE_1, 0, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, (v_freq >> 24) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8, + MSA_DATA_LANE_1, h_active_start >> 8, + MSA_DATA_LANE_2, h_width >> 8, + MSA_DATA_LANE_3, (v_freq >> 16) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff, + MSA_DATA_LANE_1, h_active_start & 0xff, + MSA_DATA_LANE_2, h_width & 0xff, + MSA_DATA_LANE_3, (v_freq >> 8) & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8, + MSA_DATA_LANE_1, v_active_start >> 8, + MSA_DATA_LANE_2, v_height >> 8, + MSA_DATA_LANE_3, v_freq & 0xff); + + REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0, + MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff, + MSA_DATA_LANE_1, v_active_start & 0xff, + MSA_DATA_LANE_2, v_height & 0xff, + MSA_DATA_LANE_3, misc0); + + REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0, + MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8), + MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8), + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, misc1); + + REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0, + MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff, + MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff, + MSA_DATA_LANE_2, 0, + MSA_DATA_LANE_3, 0); +} + +static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( + struct hpo_dp_stream_encoder *enc, + const struct encoder_info_frame *info_frame) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t dmdata_packet_enabled = 0; + bool sdp_stream_enable = false; + + if (info_frame->vsc.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 0, /* packetIndex */ + &info_frame->vsc, + true); + sdp_stream_enable = true; + } + if (info_frame->spd.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 2, /* packetIndex */ + &info_frame->spd, + true); + sdp_stream_enable = true; + } + if (info_frame->hdrsmd.valid) { + enc->vpg->funcs->update_generic_info_packet( + enc->vpg, + 3, /* packetIndex */ + &info_frame->hdrsmd, + true); + sdp_stream_enable = true; + } + /* enable/disable transmission of packet(s). + * If enabled, packet transmission begins on the next frame + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid); + + /* check if dynamic metadata packet transmission is enabled */ + REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, + METADATA_PACKET_ENABLE, &dmdata_packet_enabled); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); +} + +static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets( + struct hpo_dp_stream_encoder *enc) +{ + /* stop generic packets on DP */ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t asp_enable = 0; + uint32_t atp_enable = 0; + uint32_t aip_enable = 0; + uint32_t acm_enable = 0; + + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0); + + /* Disable secondary data path if audio is also disabled */ + REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, &asp_enable, + ATP_ENABLE, &atp_enable, + AIP_ENABLE, &aip_enable, + ACM_ENABLE, &acm_enable); + if (!(asp_enable || atp_enable || aip_enable || acm_enable)) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); +} + +static uint32_t hpo_dp_is_gsp_enabled( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + uint32_t gsp0_enabled = 0; + uint32_t gsp2_enabled = 0; + uint32_t gsp3_enabled = 0; + uint32_t gsp11_enabled = 0; + + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled); + REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled); + + return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled); +} + +static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet( + struct hpo_dp_stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + if (enable) { + struct dc_info_packet pps_sdp; + int i; + + /* Configure for PPS packet size (128 bytes) */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_PAYLOAD_SIZE, 3); + + /* Load PPS into infoframe (SDP) registers */ + pps_sdp.valid = true; + pps_sdp.hb0 = 0; + pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS; + pps_sdp.hb2 = 127; + pps_sdp.hb3 = 0; + + for (i = 0; i < 4; i++) { + memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32); + enc3->base.vpg->funcs->update_generic_info_packet( + enc3->base.vpg, + 11 + i, + &pps_sdp, + immediate_update); + } + + /* SW should make sure VBID[6] update line number is bigger + * than PPS transmit line number + */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_TRANSMISSION_LINE_NUMBER, 2); + + REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL, + VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0, + VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3); + + /* Send PPS data at the line number specified above. */ + REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1); + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + } else { + /* Disable Generic Stream Packet 11 (GSP) transmission */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11, + GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0, + GSP_PAYLOAD_SIZE, 0); + } +} + +static void dcn31_hpo_dp_stream_enc_map_stream_to_link( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_enc_inst, + uint32_t link_enc_inst) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + ASSERT(stream_enc_inst < 4 && link_enc_inst < 2); + + switch (stream_enc_inst) { + case 0: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 1: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 2: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + case 3: + REG_UPDATE(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, link_enc_inst); + break; + } +} + +static void dcn31_hpo_dp_stream_enc_mute_control( + struct hpo_dp_stream_encoder *enc, + bool mute) +{ + ASSERT(enc->apg); + enc->apg->funcs->audio_mute_control(enc->apg, mute); +} + +static void dcn31_hpo_dp_stream_enc_audio_setup( + struct hpo_dp_stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Set the input mux for video stream source */ + REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL, + DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst); + + ASSERT(enc->apg); + enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info); +} + +static void dcn31_hpo_dp_stream_enc_audio_enable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Enable Audio packets */ + REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1); + + /* Program the ATP and AIP next */ + REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ATP_ENABLE, 1, + AIP_ENABLE, 1); + + /* Enable secondary data path */ + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 1); + + /* Enable APG block */ + enc->apg->funcs->enable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_audio_disable( + struct hpo_dp_stream_encoder *enc) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + /* Disable Audio packets */ + REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, + ASP_ENABLE, 0, + ATP_ENABLE, 0, + AIP_ENABLE, 0, + ACM_ENABLE, 0); + + /* Disable STP Stream Enable if other SDP GSP are also disabled */ + if (!(hpo_dp_is_gsp_enabled(enc))) + REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, 0); + + /* Disable APG block */ + enc->apg->funcs->disable_apg(enc->apg); +} + +static void dcn31_hpo_dp_stream_enc_read_state( + struct hpo_dp_stream_encoder *enc, + struct hpo_dp_stream_encoder_state *s) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_GET(DP_SYM32_ENC_CONTROL, + DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled); + REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL, + VID_STREAM_ENABLE, &s->vid_stream_enabled); + REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL, + DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst); + + REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT, + PIXEL_ENCODING_TYPE, &s->compressed_format, + UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding, + UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth); + + REG_GET(DP_SYM32_ENC_SDP_CONTROL, + SDP_STREAM_ENABLE, &s->sdp_enabled); + + switch (enc->inst) { + case 0: + REG_GET(DP_STREAM_MAPPER_CONTROL0, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 1: + REG_GET(DP_STREAM_MAPPER_CONTROL1, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 2: + REG_GET(DP_STREAM_MAPPER_CONTROL2, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + case 3: + REG_GET(DP_STREAM_MAPPER_CONTROL3, + DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc); + break; + } +} + +static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { + .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, + .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, + .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank, + .disable = dcn31_hpo_dp_stream_enc_disable, + .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute, + .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets, + .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets, + .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet, + .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link, + .audio_mute_control = dcn31_hpo_dp_stream_enc_mute_control, + .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup, + .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, + .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, + .read_state = dcn31_hpo_dp_stream_enc_read_state, +}; + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask) +{ + enc3->base.funcs = &dcn30_str_enc_funcs; + enc3->base.ctx = ctx; + enc3->base.inst = inst; + enc3->base.id = eng_id; + enc3->base.bp = bp; + enc3->base.vpg = vpg; + enc3->base.apg = apg; + enc3->regs = regs; + enc3->hpo_se_shift = hpo_se_shift; + enc3->hpo_se_mask = hpo_se_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h new file mode 100644 index 000000000000..70b94fc25304 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h @@ -0,0 +1,241 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ +#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__ + +#include "dcn30/dcn30_vpg.h" +#include "dcn31/dcn31_apg.h" +#include "stream_encoder.h" + + +#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\ + container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base) + + +/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */ +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18 +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L +#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \ + SR(DP_STREAM_MAPPER_CONTROL0),\ + SR(DP_STREAM_MAPPER_CONTROL1),\ + SR(DP_STREAM_MAPPER_CONTROL2),\ + SR(DP_STREAM_MAPPER_CONTROL3),\ + SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\ + SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\ + SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ + SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id) + +#define DCN3_1_HPO_DP_STREAM_ENC_REGS \ + uint32_t DP_STREAM_MAPPER_CONTROL0;\ + uint32_t DP_STREAM_MAPPER_CONTROL1;\ + uint32_t DP_STREAM_MAPPER_CONTROL2;\ + uint32_t DP_STREAM_MAPPER_CONTROL3;\ + uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\ + uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\ + uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\ + uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\ + uint32_t DP_SYM32_ENC_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\ + uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA0;\ + uint32_t DP_SYM32_ENC_VID_MSA1;\ + uint32_t DP_SYM32_ENC_VID_MSA2;\ + uint32_t DP_SYM32_ENC_VID_MSA3;\ + uint32_t DP_SYM32_ENC_VID_MSA4;\ + uint32_t DP_SYM32_ENC_VID_MSA5;\ + uint32_t DP_SYM32_ENC_VID_MSA6;\ + uint32_t DP_SYM32_ENC_VID_MSA7;\ + uint32_t DP_SYM32_ENC_VID_MSA8;\ + uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\ + uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\ + uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ + uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ + uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ + uint32_t DP_SYM32_ENC_VID_CRC_CONTROL + + +#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ + SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\ + SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\ + SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh) + + +#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \ + type DP_STREAM_LINK_TARGET;\ + type DP_STREAM_ENC_CLOCK_EN;\ + type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\ + type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\ + type FIFO_RESET;\ + type FIFO_RESET_DONE;\ + type FIFO_ENABLE;\ + type DP_SYM32_ENC_RESET;\ + type DP_SYM32_ENC_RESET_DONE;\ + type DP_SYM32_ENC_ENABLE;\ + type PIXEL_ENCODING_TYPE;\ + type UNCOMPRESSED_PIXEL_ENCODING;\ + type UNCOMPRESSED_COMPONENT_DEPTH;\ + type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\ + type MSA_DOUBLE_BUFFER_ENABLE;\ + type MSA_DATA_LANE_0;\ + type MSA_DATA_LANE_1;\ + type MSA_DATA_LANE_2;\ + type MSA_DATA_LANE_3;\ + type PIXEL_TO_SYMBOL_FIFO_RESET;\ + type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\ + type PIXEL_TO_SYMBOL_FIFO_ENABLE;\ + type VID_STREAM_ENABLE;\ + type VID_STREAM_STATUS;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\ + type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\ + type SDP_STREAM_ENABLE;\ + type AUDIO_MUTE;\ + type ASP_ENABLE;\ + type ATP_ENABLE;\ + type AIP_ENABLE;\ + type ACM_ENABLE;\ + type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\ + type GSP_PAYLOAD_SIZE;\ + type GSP_TRANSMISSION_LINE_NUMBER;\ + type GSP_SOF_REFERENCE;\ + type METADATA_PACKET_ENABLE;\ + type CRC_ENABLE;\ + type CRC_CONT_MODE_ENABLE + + +struct dcn31_hpo_dp_stream_encoder_registers { + DCN3_1_HPO_DP_STREAM_ENC_REGS; +}; + +struct dcn31_hpo_dp_stream_encoder_shift { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_hpo_dp_stream_encoder_mask { + DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_hpo_dp_stream_encoder { + struct hpo_dp_stream_encoder base; + const struct dcn31_hpo_dp_stream_encoder_registers *regs; + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift; + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask; +}; + + +void dcn31_hpo_dp_stream_encoder_construct( + struct dcn31_hpo_dp_stream_encoder *enc3, + struct dc_context *ctx, + struct dc_bios *bp, + uint32_t inst, + enum engine_id eng_id, + struct vpg *vpg, + struct apg *apg, + const struct dcn31_hpo_dp_stream_encoder_registers *regs, + const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift, + const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask); + + +#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 3f2333ec67e2..4206ce5bf9a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -48,6 +48,9 @@ #include "dc_link_dp.h" #include "inc/link_dpcd.h" #include "dcn10/dcn10_hw_sequencer.h" +#include "inc/link_enc_cfg.h" +#include "dcn30/dcn30_vpg.h" +#include "dce/dce_i2c_hw.h" #define DC_LOGGER_INIT(logger) @@ -63,6 +66,45 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +static void enable_memory_low_power(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (dc->debug.enable_mem_low_power.bits.dmcu) { + // Force ERAM to shutdown if DMCU is not enabled + if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { + REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); + } + } + + // Set default OPTC memory power states + if (dc->debug.enable_mem_low_power.bits.optc) { + // Shutdown when unassigned and light sleep in VBLANK + REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.vga) { + // Power down VGA memory + REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); + } + + if (dc->debug.enable_mem_low_power.bits.mpc) + dc->res_pool->mpc->funcs->set_mpc_mem_lp_mode(dc->res_pool->mpc); + + + if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) { + // Power down VPGs + for (i = 0; i < dc->res_pool->stream_enc_count; i++) + dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++) + dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg); +#endif + } + +} + void dcn31_init_hw(struct dc *dc) { struct abm **abms = dc->res_pool->multiple_abms; @@ -70,16 +112,11 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - int i, j; - int edp_num; + int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); - // Initialize the dccg - if (res_pool->dccg->funcs->dccg_init) - res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { REG_WRITE(REFCLK_CNTL, 0); @@ -106,24 +143,11 @@ void dcn31_init_hw(struct dc *dc) hws->funcs.bios_golden_init(dc); hws->funcs.disable_vga(dc->hwseq); } + // Initialize the dccg + if (res_pool->dccg->funcs->dccg_init) + res_pool->dccg->funcs->dccg_init(res_pool->dccg); - if (dc->debug.enable_mem_low_power.bits.dmcu) { - // Force ERAM to shutdown if DMCU is not enabled - if (dc->debug.disable_dmcu || dc->config.disable_dmcu) { - REG_UPDATE(DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, 3); - } - } - - // Set default OPTC memory power states - if (dc->debug.enable_mem_low_power.bits.optc) { - // Shutdown when unassigned and light sleep in VBLANK - REG_SET_2(ODM_MEM_PWR_CTRL3, 0, ODM_MEM_UNASSIGNED_PWR_MODE, 3, ODM_MEM_VBLANK_PWR_MODE, 1); - } - - if (dc->debug.enable_mem_low_power.bits.vga) { - // Power down VGA memory - REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1); - } + enable_memory_low_power(dc); if (dc->ctx->dc_bios->fw_info_valid) { res_pool->ref_clocks.xtalin_clock_inKhz = @@ -157,6 +181,9 @@ void dcn31_init_hw(struct dc *dc) */ struct dc_link *link = dc->links[i]; + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + continue; + link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ @@ -165,45 +192,13 @@ void dcn31_init_hw(struct dc *dc) link->link_status.link_active = true; } - /* Power gate DSCs */ - for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (hws->funcs.dsc_pg_control != NULL) - hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); + /* Enables outbox notifications for usb4 dpia */ + if (dc->res_pool->usb4_dpia_count) + dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank( - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -218,47 +213,6 @@ void dcn31_init_hw(struct dc *dc) !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter); } - /* In headless boot cases, DIG may be turned - * on which causes HW/SW discrepancies. - * To avoid this, power down hardware on boot - * if DIG is turned on and seamless boot not enabled - */ - if (dc->config.power_down_display_on_boot) { - struct dc_link *edp_links[MAX_NUM_EDP]; - struct dc_link *edp_link; - bool power_down = false; - - get_edp_links(dc, edp_links, &edp_num); - if (edp_num) { - for (i = 0; i < edp_num; i++) { - edp_link = edp_links[i]; - if (edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwss.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwss.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); - power_down = true; - } - } - } - if (!power_down) { - for (i = 0; i < dc->link_count; i++) { - struct dc_link *link = dc->links[i]; - - if (link->link_enc->funcs->is_dig_enabled && - link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); - break; - } - - } - } - } - for (i = 0; i < res_pool->audio_count; i++) { struct audio *audio = res_pool->audios[i]; @@ -280,6 +234,13 @@ void dcn31_init_hw(struct dc *dc) /* power AFMT HDMI memory TODO: may move to dis/en output save power*/ REG_WRITE(DIO_MEM_PWR_CTRL, 0); + // Set i2c to light sleep until engine is setup + if (dc->debug.enable_mem_low_power.bits.i2c) + REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1); + + if (hws->funcs.setup_hpo_hw_control) + hws->funcs.setup_hpo_hw_control(hws, false); + if (!dc->debug.disable_clock_gate) { /* enable all DCN clock gating */ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); @@ -303,8 +264,10 @@ void dcn31_init_hw(struct dc *dc) if (dc->res_pool->hubbub->funcs->force_pstate_change_control) dc->res_pool->hubbub->funcs->force_pstate_change_control( dc->res_pool->hubbub, false, false); +#if defined(CONFIG_DRM_AMD_DC_DCN) if (dc->res_pool->hubbub->funcs->init_crb) dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); +#endif } void dcn31_dsc_pg_control( @@ -319,6 +282,12 @@ void dcn31_dsc_pg_control( if (hws->ctx->dc->debug.disable_dsc_power_gate) return; + if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc && + hws->ctx->dc->res_pool->dccg->funcs->enable_dsc && + power_on) + hws->ctx->dc->res_pool->dccg->funcs->enable_dsc( + hws->ctx->dc->res_pool->dccg, dsc_inst); + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); @@ -355,6 +324,13 @@ void dcn31_dsc_pg_control( if (org_ip_request_cntl == 0) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); + + if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) { + if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on) + hws->ctx->dc->res_pool->dccg->funcs->disable_dsc( + hws->ctx->dc->res_pool->dccg, dsc_inst); + } + } @@ -420,7 +396,7 @@ void dcn31_z10_save_init(struct dc *dc) dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); } -void dcn31_z10_restore(struct dc *dc) +void dcn31_z10_restore(const struct dc *dc) { union dmub_rb_cmd cmd; @@ -589,24 +565,18 @@ void dcn31_reset_hw_ctx_wrap( dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } } + + /* New dc_state in the process of being applied to hardware. */ + dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT; } -bool dcn31_is_abm_supported(struct dc *dc, - struct dc_state *context, struct dc_stream_state *stream) +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable) { - int i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - (pipe_ctx->prev_odm_pipe == NULL && pipe_ctx->next_odm_pipe == NULL)) - return true; - } - return false; + if (hws->ctx->dc->debug.hpo_optimization) + REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h index 140435e4f7ff..edfc01d6ad73 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h @@ -43,7 +43,7 @@ void dcn31_enable_power_gating_plane( void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx); -void dcn31_z10_restore(struct dc *dc); +void dcn31_z10_restore(const struct dc *dc); void dcn31_z10_save_init(struct dc *dc); void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); @@ -54,5 +54,6 @@ void dcn31_reset_hw_ctx_wrap( bool dcn31_is_abm_supported(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); void dcn31_init_pipes(struct dc *dc, struct dc_state *context); +void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable); #endif /* __DC_HWSS_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 40011cd3c8ef..e175b6cc0125 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn31_init_hw, + .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, @@ -93,12 +94,12 @@ static const struct hw_sequencer_funcs dcn31_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .calc_vupdate_position = dcn10_calc_vupdate_position, + .power_down = dce110_power_down, .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, .z10_restore = dcn31_z10_restore, .z10_save_init = dcn31_z10_save_init, - .is_abm_supported = dcn31_is_abm_supported, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; @@ -136,6 +137,7 @@ static const struct hwseq_private_funcs dcn31_private_funcs = { .dccg_init = dcn20_dccg_init, .set_blend_lut = dcn30_set_blend_lut, .set_shaper_3dlut = dcn20_set_shaper_3dlut, + .setup_hpo_hw_control = dcn31_setup_hpo_hw_control, }; void dcn31_hw_sequencer_construct(struct dc *dc) @@ -147,4 +149,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss.init_hw = dcn20_fpga_init_hw; dc->hwseq->funcs.init_pipes = NULL; } + if (dc->debug.disable_z10) { + /*hw not support z10 or sw disable it*/ + dc->hwss.z10_restore = NULL; + dc->hwss.z10_save_init = NULL; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index a4b1d98f0007..e8562fa11366 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc31_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index a7702d3c75cd..88e040687940 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -52,7 +52,12 @@ #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_stream_encoder.h" +#include "dcn31/dcn31_hpo_dp_link_encoder.h" +#include "dcn31/dcn31_apg.h" #include "dcn31/dcn31_dio_link_encoder.h" +#include "dcn31/dcn31_vpg.h" +#include "dcn31/dcn31_afmt.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" #include "dce/dce_hwseq.h" @@ -96,8 +101,6 @@ #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) -#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16))) -#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x)) #define DCN3_1_DEFAULT_DET_SIZE 384 @@ -217,8 +220,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = { .num_states = 5, .sr_exit_time_us = 9.0, .sr_enter_plus_exit_time_us = 11.0, - .sr_exit_z8_time_us = 402.0, - .sr_enter_plus_exit_z8_time_us = 520.0, + .sr_exit_z8_time_us = 442.0, + .sr_enter_plus_exit_z8_time_us = 560.0, .writeback_latency_us = 12.0, .dram_channel_width_bytes = 4, .round_trip_ping_latency_dcfclk_cycles = 106, @@ -363,7 +366,7 @@ static const struct dce110_clk_src_mask cs_mask = { #define abm_regs(id)\ [id] = {\ - ABM_DCN301_REG_LIST(id)\ + ABM_DCN302_REG_LIST(id)\ } static const struct dce_abm_registers abm_regs[] = { @@ -411,10 +414,10 @@ static const struct dce_audio_mask audio_mask = { #define vpg_regs(id)\ [id] = {\ - VPG_DCN3_REG_LIST(id)\ + VPG_DCN31_REG_LIST(id)\ } -static const struct dcn30_vpg_registers vpg_regs[] = { +static const struct dcn31_vpg_registers vpg_regs[] = { vpg_regs(0), vpg_regs(1), vpg_regs(2), @@ -427,20 +430,20 @@ static const struct dcn30_vpg_registers vpg_regs[] = { vpg_regs(9), }; -static const struct dcn30_vpg_shift vpg_shift = { - DCN3_VPG_MASK_SH_LIST(__SHIFT) +static const struct dcn31_vpg_shift vpg_shift = { + DCN31_VPG_MASK_SH_LIST(__SHIFT) }; -static const struct dcn30_vpg_mask vpg_mask = { - DCN3_VPG_MASK_SH_LIST(_MASK) +static const struct dcn31_vpg_mask vpg_mask = { + DCN31_VPG_MASK_SH_LIST(_MASK) }; #define afmt_regs(id)\ [id] = {\ - AFMT_DCN3_REG_LIST(id)\ + AFMT_DCN31_REG_LIST(id)\ } -static const struct dcn30_afmt_registers afmt_regs[] = { +static const struct dcn31_afmt_registers afmt_regs[] = { afmt_regs(0), afmt_regs(1), afmt_regs(2), @@ -449,12 +452,32 @@ static const struct dcn30_afmt_registers afmt_regs[] = { afmt_regs(5) }; -static const struct dcn30_afmt_shift afmt_shift = { - DCN3_AFMT_MASK_SH_LIST(__SHIFT) +static const struct dcn31_afmt_shift afmt_shift = { + DCN31_AFMT_MASK_SH_LIST(__SHIFT) }; -static const struct dcn30_afmt_mask afmt_mask = { - DCN3_AFMT_MASK_SH_LIST(_MASK) +static const struct dcn31_afmt_mask afmt_mask = { + DCN31_AFMT_MASK_SH_LIST(_MASK) +}; + +#define apg_regs(id)\ +[id] = {\ + APG_DCN31_REG_LIST(id)\ +} + +static const struct dcn31_apg_registers apg_regs[] = { + apg_regs(0), + apg_regs(1), + apg_regs(2), + apg_regs(3) +}; + +static const struct dcn31_apg_shift apg_shift = { + DCN31_APG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_apg_mask apg_mask = { + DCN31_APG_MASK_SH_LIST(_MASK) }; #define stream_enc_regs(id)\ @@ -538,6 +561,49 @@ static const struct dcn10_link_enc_mask le_mask = { DPCS_DCN31_MASK_SH_LIST(_MASK) }; +#define hpo_dp_stream_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\ +} + +static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = { + hpo_dp_stream_encoder_reg_list(0), + hpo_dp_stream_encoder_reg_list(1), + hpo_dp_stream_encoder_reg_list(2), + hpo_dp_stream_encoder_reg_list(3), +}; + +static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = { + DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK) +}; + +#define hpo_dp_link_encoder_reg_list(id)\ +[id] = {\ + DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\ + DCN3_1_RDPCSTX_REG_LIST(0),\ + DCN3_1_RDPCSTX_REG_LIST(1),\ + DCN3_1_RDPCSTX_REG_LIST(2),\ + DCN3_1_RDPCSTX_REG_LIST(3),\ + DCN3_1_RDPCSTX_REG_LIST(4)\ +} + +static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = { + hpo_dp_link_encoder_reg_list(0), + hpo_dp_link_encoder_reg_list(1), +}; + +static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = { + DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK) +}; + #define dpp_regs(id)\ [id] = {\ DPP_REG_LIST_DCN30(id),\ @@ -794,7 +860,8 @@ static const struct dccg_mask dccg_mask = { SR(D6VGA_CONTROL), \ SR(DC_IP_REQUEST_CNTL), \ SR(AZALIA_AUDIO_DTO), \ - SR(AZALIA_CONTROLLER_CLOCK_GATING) + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_HW_CONTROL) static const struct dce_hwseq_registers hwseq_reg = { HWSEQ_DCN31_REG_LIST() @@ -831,7 +898,9 @@ static const struct dce_hwseq_registers hwseq_reg = { HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \ HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \ - HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh) + HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \ + HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh), \ + HWS_SF(, HPO_TOP_HW_CONTROL, HPO_IO_EN, mask_sh) static const struct dce_hwseq_shift hwseq_shift = { HWSEQ_DCN31_MASK_SH_LIST(__SHIFT) @@ -879,6 +948,8 @@ static const struct resource_caps res_cap_dcn31 = { .num_audio = 5, .num_stream_encoder = 5, .num_dig_link_enc = 5, + .num_hpo_dp_stream_encoder = 4, + .num_hpo_dp_link_encoder = 2, .num_pll = 5, .num_dwb = 1, .num_ddc = 5, @@ -928,7 +999,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 7680,/*upto 8K*/ + .max_downscale_src_width = 4096,/*upto true 4K*/ .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = false, @@ -939,13 +1010,15 @@ static const struct dc_debug_options debug_defaults_drv = { .use_max_lb = true, .enable_mem_low_power = { .bits = { - .vga = false, - .i2c = false, + .vga = true, + .i2c = true, .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled - .dscl = false, - .cm = false, - .mpc = false, - .optc = false, + .dscl = true, + .cm = true, + .mpc = true, + .optc = true, + .vpg = true, + .afmt = true, } }, .optimize_edp_link_rate = true, @@ -1230,34 +1303,53 @@ static struct vpg *dcn31_vpg_create( struct dc_context *ctx, uint32_t inst) { - struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL); - if (!vpg3) + if (!vpg31) return NULL; - vpg3_construct(vpg3, ctx, inst, + vpg31_construct(vpg31, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); - return &vpg3->base; + return &vpg31->base; } static struct afmt *dcn31_afmt_create( struct dc_context *ctx, uint32_t inst) { - struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL); - if (!afmt3) + if (!afmt31) return NULL; - afmt3_construct(afmt3, ctx, inst, + afmt31_construct(afmt31, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); - return &afmt3->base; + // Light sleep by default, no need to power down here + + return &afmt31->base; +} + +static struct apg *dcn31_apg_create( + struct dc_context *ctx, + uint32_t inst) +{ + struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL); + + if (!apg31) + return NULL; + + apg31_construct(apg31, ctx, inst, + &apg_regs[inst], + &apg_shift, + &apg_mask); + + return &apg31->base; } static struct stream_encoder *dcn31_stream_encoder_create( @@ -1281,8 +1373,18 @@ static struct stream_encoder *dcn31_stream_encoder_create( vpg = dcn31_vpg_create(ctx, vpg_inst); afmt = dcn31_afmt_create(ctx, afmt_inst); - if (!enc1 || !vpg || !afmt) + if (!enc1 || !vpg || !afmt) { + kfree(enc1); + kfree(vpg); + kfree(afmt); return NULL; + } + + if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { + if ((eng_id == ENGINE_ID_DIGC) || (eng_id == ENGINE_ID_DIGD)) + eng_id = eng_id + 3; // For B0 only. C->F, D->G. + } dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, @@ -1292,6 +1394,72 @@ static struct stream_encoder *dcn31_stream_encoder_create( return &enc1->base; } +static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create( + enum engine_id eng_id, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31; + struct vpg *vpg; + struct apg *apg; + uint32_t hpo_dp_inst; + uint32_t vpg_inst; + uint32_t apg_inst; + + ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3)); + hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0; + + /* Mapping of VPG register blocks to HPO DP block instance: + * VPG[6] -> HPO_DP[0] + * VPG[7] -> HPO_DP[1] + * VPG[8] -> HPO_DP[2] + * VPG[9] -> HPO_DP[3] + */ + vpg_inst = hpo_dp_inst + 6; + + /* Mapping of APG register blocks to HPO DP block instance: + * APG[0] -> HPO_DP[0] + * APG[1] -> HPO_DP[1] + * APG[2] -> HPO_DP[2] + * APG[3] -> HPO_DP[3] + */ + apg_inst = hpo_dp_inst; + + /* allocate HPO stream encoder and create VPG sub-block */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL); + vpg = dcn31_vpg_create(ctx, vpg_inst); + apg = dcn31_apg_create(ctx, apg_inst); + + if (!hpo_dp_enc31 || !vpg || !apg) { + kfree(hpo_dp_enc31); + kfree(vpg); + kfree(apg); + return NULL; + } + + dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios, + hpo_dp_inst, eng_id, vpg, apg, + &hpo_dp_stream_enc_regs[hpo_dp_inst], + &hpo_dp_se_shift, &hpo_dp_se_mask); + + return &hpo_dp_enc31->base; +} + +static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( + uint8_t inst, + struct dc_context *ctx) +{ + struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31; + + /* allocate HPO link encoder */ + hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + + hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, + &hpo_dp_link_enc_regs[inst], + &hpo_dp_le_shift, &hpo_dp_le_mask); + + return &hpo_dp_enc31->base; +} + static struct dce_hwseq *dcn31_hwseq_create( struct dc_context *ctx) { @@ -1302,6 +1470,13 @@ static struct dce_hwseq *dcn31_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + /* DCN3.1 FPGA Workaround + * Need to enable HPO DP Stream Encoder before setting OTG master enable. + * To do so, move calling function enable_stream_timing to only be done AFTER calling + * function core_link_enable_stream + */ + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) + hws->wa.dp_hpo_and_otg_sequence = true; } return hws; } @@ -1309,6 +1484,8 @@ static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = dcn31_create_audio, .create_stream_encoder = dcn31_stream_encoder_create, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, .create_hwseq = dcn31_hwseq_create, }; @@ -1316,6 +1493,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .read_dce_straps = NULL, .create_audio = NULL, .create_stream_encoder = NULL, + .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create, + .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create, .create_hwseq = dcn31_hwseq_create, }; @@ -1338,6 +1517,28 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool) } } + for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) { + if (pool->base.hpo_dp_stream_enc[i] != NULL) { + if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg)); + pool->base.hpo_dp_stream_enc[i]->vpg = NULL; + } + if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) { + kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg)); + pool->base.hpo_dp_stream_enc[i]->apg = NULL; + } + kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i])); + pool->base.hpo_dp_stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) { + if (pool->base.hpo_dp_link_enc[i] != NULL) { + kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i])); + pool->base.hpo_dp_link_enc[i] = NULL; + } + } + for (i = 0; i < pool->base.res_cap->num_dsc; i++) { if (pool->base.dscs[i] != NULL) dcn20_dsc_destroy(&pool->base.dscs[i]); @@ -1584,6 +1785,13 @@ static int dcn31_populate_dml_pipes_from_context( pipe = &res_ctx->pipe_ctx[i]; timing = &pipe->stream->timing; + /* + * Immediate flip can be set dynamically after enabling the plane. + * We need to require support for immediate flip or underflow can be + * intermittently experienced depending on peak b/w requirements. + */ + pipes[pipe_cnt].pipe.src.immediate_flip = true; + pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.src.gpuvm = true; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; @@ -1626,7 +1834,7 @@ static int dcn31_populate_dml_pipes_from_context( return pipe_cnt; } -static void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) { if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us; @@ -1647,6 +1855,15 @@ static void dcn31_calculate_wm_and_dlg_fp( if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk) dcfclk = context->bw_ctx.dml.soc.min_dcfclk; + /* We don't recalculate clocks for 0 pipe configs, which can block + * S0i3 as high clocks will block low power states + * Override any clocks that can block S0i3 to min here + */ + if (pipe_cnt == 0) { + context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0 + return; + } + pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = dcfclk; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; @@ -1761,7 +1978,7 @@ static void dcn31_calculate_wm_and_dlg_fp( dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); } -static void dcn31_calculate_wm_and_dlg( +void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, @@ -1772,6 +1989,58 @@ static void dcn31_calculate_wm_and_dlg( DC_FP_END(); } +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + + // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + if (pipe_cnt == 0) + fast_validate = false; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + kfree(pipes); + + BW_VAL_TRACE_FINISH(); + + return out; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap }; @@ -1854,7 +2123,7 @@ static struct resource_funcs dcn31_res_pool_funcs = { .link_encs_assign = link_enc_cfg_link_encs_assign, .link_enc_unassign = link_enc_cfg_link_enc_unassign, .panel_cntl_create = dcn31_panel_cntl_create, - .validate_bandwidth = dcn30_validate_bandwidth, + .validate_bandwidth = dcn31_validate_bandwidth, .calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn31_populate_dml_pipes_from_context, @@ -1929,6 +2198,8 @@ static bool dcn31_resource_construct( dc->caps.max_slave_rgb_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.dp_hpo = true; + dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; @@ -2167,6 +2438,13 @@ static bool dcn31_resource_construct( pool->base.sw_i2cs[i] = NULL; } + if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && + dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && + !dc->debug.dpia_debug.bits.disable_dpia) { + /* YELLOW CARP B0 has 4 DPIA's */ + pool->base.usb4_dpia_count = 4; + } + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ if (!resource_construct(num_virtual_links, dc, &pool->base, (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? @@ -2183,6 +2461,8 @@ static bool dcn31_resource_construct( dc->cap_funcs = cap_funcs; + dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; + DC_FP_END(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index 93571c976996..416fe7a721d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -35,6 +35,16 @@ struct dcn31_resource_pool { struct resource_pool base; }; +bool dcn31_validate_bandwidth(struct dc *dc, + struct dc_state *context, + bool fast_validate); +void dcn31_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); +void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); + struct resource_pool *dcn31_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c new file mode 100644 index 000000000000..f1deb1c3c363 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c @@ -0,0 +1,87 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_bios_types.h" +#include "dcn30/dcn30_vpg.h" +#include "dcn31_vpg.h" +#include "reg_helper.h" +#include "dc/dc.h" + +#define DC_LOGGER \ + vpg31->base.ctx->logger + +#define REG(reg)\ + (vpg31->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name + + +#define CTX \ + vpg31->base.ctx + +static struct vpg_funcs dcn31_vpg_funcs = { + .update_generic_info_packet = vpg3_update_generic_info_packet, + .vpg_poweron = vpg31_poweron, + .vpg_powerdown = vpg31_powerdown, +}; + +void vpg31_powerdown(struct vpg *vpg) +{ + struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + return; + + REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1); +} + +void vpg31_poweron(struct vpg *vpg) +{ + struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + return; + + REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); +} + +void vpg31_construct(struct dcn31_vpg *vpg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_vpg_registers *vpg_regs, + const struct dcn31_vpg_shift *vpg_shift, + const struct dcn31_vpg_mask *vpg_mask) +{ + vpg31->base.ctx = ctx; + + vpg31->base.inst = inst; + vpg31->base.funcs = &dcn31_vpg_funcs; + + vpg31->regs = vpg_regs; + vpg31->vpg_shift = vpg_shift; + vpg31->vpg_mask = vpg_mask; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h new file mode 100644 index 000000000000..0e76eabce441 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h @@ -0,0 +1,162 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCN31_VPG_H__ +#define __DAL_DCN31_VPG_H__ + + +#define DCN31_VPG_FROM_VPG(vpg)\ + container_of(vpg, struct dcn31_vpg, base) + +#define VPG_DCN31_REG_LIST(id) \ + SRI(VPG_GENERIC_STATUS, VPG, id), \ + SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \ + SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \ + SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \ + SRI(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \ + SRI(VPG_MEM_PWR, VPG, id) + +struct dcn31_vpg_registers { + uint32_t VPG_GENERIC_STATUS; + uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL; + uint32_t VPG_GENERIC_PACKET_DATA; + uint32_t VPG_GSP_FRAME_UPDATE_CTRL; + uint32_t VPG_GSP_IMMEDIATE_UPDATE_CTRL; + uint32_t VPG_MEM_PWR; +}; + +#define DCN31_VPG_MASK_SH_LIST(mask_sh)\ + SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\ + SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC8_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC9_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC10_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC11_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC12_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC13_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC14_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_LIGHT_SLEEP_FORCE, mask_sh),\ + SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, mask_sh) + +#define VPG_DCN31_REG_FIELD_LIST(type) \ + type VPG_GENERIC_CONFLICT_OCCURED;\ + type VPG_GENERIC_CONFLICT_CLR;\ + type VPG_GENERIC_DATA_INDEX;\ + type VPG_GENERIC_DATA_BYTE0;\ + type VPG_GENERIC_DATA_BYTE1;\ + type VPG_GENERIC_DATA_BYTE2;\ + type VPG_GENERIC_DATA_BYTE3;\ + type VPG_GENERIC0_FRAME_UPDATE;\ + type VPG_GENERIC1_FRAME_UPDATE;\ + type VPG_GENERIC2_FRAME_UPDATE;\ + type VPG_GENERIC3_FRAME_UPDATE;\ + type VPG_GENERIC4_FRAME_UPDATE;\ + type VPG_GENERIC5_FRAME_UPDATE;\ + type VPG_GENERIC6_FRAME_UPDATE;\ + type VPG_GENERIC7_FRAME_UPDATE;\ + type VPG_GENERIC8_FRAME_UPDATE;\ + type VPG_GENERIC9_FRAME_UPDATE;\ + type VPG_GENERIC10_FRAME_UPDATE;\ + type VPG_GENERIC11_FRAME_UPDATE;\ + type VPG_GENERIC12_FRAME_UPDATE;\ + type VPG_GENERIC13_FRAME_UPDATE;\ + type VPG_GENERIC14_FRAME_UPDATE;\ + type VPG_GENERIC0_IMMEDIATE_UPDATE;\ + type VPG_GENERIC1_IMMEDIATE_UPDATE;\ + type VPG_GENERIC2_IMMEDIATE_UPDATE;\ + type VPG_GENERIC3_IMMEDIATE_UPDATE;\ + type VPG_GENERIC4_IMMEDIATE_UPDATE;\ + type VPG_GENERIC5_IMMEDIATE_UPDATE;\ + type VPG_GENERIC6_IMMEDIATE_UPDATE;\ + type VPG_GENERIC7_IMMEDIATE_UPDATE;\ + type VPG_GENERIC8_IMMEDIATE_UPDATE;\ + type VPG_GENERIC9_IMMEDIATE_UPDATE;\ + type VPG_GENERIC10_IMMEDIATE_UPDATE;\ + type VPG_GENERIC11_IMMEDIATE_UPDATE;\ + type VPG_GENERIC12_IMMEDIATE_UPDATE;\ + type VPG_GENERIC13_IMMEDIATE_UPDATE;\ + type VPG_GENERIC14_IMMEDIATE_UPDATE;\ + type VPG_GSP_MEM_LIGHT_SLEEP_DIS;\ + type VPG_GSP_LIGHT_SLEEP_FORCE;\ + type VPG_GSP_MEM_PWR_STATE + +struct dcn31_vpg_shift { + VPG_DCN31_REG_FIELD_LIST(uint8_t); +}; + +struct dcn31_vpg_mask { + VPG_DCN31_REG_FIELD_LIST(uint32_t); +}; + +struct dcn31_vpg { + struct vpg base; + const struct dcn31_vpg_registers *regs; + const struct dcn31_vpg_shift *vpg_shift; + const struct dcn31_vpg_mask *vpg_mask; +}; + +void vpg31_poweron( + struct vpg *vpg); + +void vpg31_powerdown( + struct vpg *vpg); + +void vpg31_construct(struct dcn31_vpg *vpg31, + struct dc_context *ctx, + uint32_t inst, + const struct dcn31_vpg_registers *vpg_regs, + const struct dcn31_vpg_shift *vpg_shift, + const struct dcn31_vpg_mask *vpg_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index a9170b9f84d3..511f9e1159c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -35,8 +35,11 @@ struct cp_psp_stream_config { uint8_t link_enc_idx; uint8_t stream_enc_idx; uint8_t phy_idx; + uint8_t dio_output_idx; + uint8_t dio_output_type; uint8_t assr_enabled; uint8_t mst_enabled; + uint8_t dp2_enabled; void *dm_stream_ctx; bool dpms_off; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 9ab854293ace..0fe66b080a03 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -160,6 +160,12 @@ void dm_set_dcn_clocks( struct dc_context *ctx, struct dc_clocks *clks); +#if defined(CONFIG_DRM_AMD_DC_DCN) +void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable); +#endif + +void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz); + bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable); void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us); @@ -173,4 +179,9 @@ int dm_helper_dmub_aux_transfer_sync( const struct dc_link *link, struct aux_payload *payload, enum aux_return_code_type *operation_result); +enum set_config_status; +int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, + const struct dc_link *link, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result); #endif /* __DM_HELPERS__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 56055df2e8d2..eee6672bd32d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -58,7 +58,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) @@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) @@ -83,7 +85,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) endif CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) @@ -93,12 +97,14 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags) DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ ifdef CONFIG_DRM_AMD_DC_DCN +DML += dcn20/dcn20_fpu.o DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o -DML += dcn2x/dcn2x.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o +DML += dcn301/dcn301_fpu.o +DML += dsc/rc_calc_fpu.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index c58522436291..d590dc917363 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -26,7 +26,7 @@ #include "resource.h" -#include "dcn2x.h" +#include "dcn20_fpu.h" /** * DOC: DCN2x FPU manipulation Overview diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index 331547ba0713..36f26126d574 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -24,11 +24,11 @@ * */ -#ifndef __DCN2X_H__ -#define __DCN2X_H__ +#ifndef __DCN20_FPU_H__ +#define __DCN20_FPU_H__ void dcn20_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); -#endif /* __DCN2X_H__ */ +#endif /* __DCN20_FPU_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 2091dd8c252d..246071c72f6b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -37,8 +37,8 @@ // static void dml20_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, - display_rq_params_st * rq_param, - const display_pipe_source_params_st pipe_src_param); + display_rq_params_st *rq_param, + const display_pipe_source_params_st *pipe_src_param); // Function: dml20_rq_dlg_get_dlg_params // Calculate deadline related parameters @@ -49,8 +49,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en); /* @@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_source_params_st pipe_src_param, + const display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = false; @@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); } static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { // get param for luma surface - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -751,7 +751,7 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, pipe_src_param, 0); - if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -763,20 +763,20 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, // calculate how to split the det buffer space between luma and chroma handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); - dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); - extract_rq_regs(mode_lib, rq_regs, rq_param); + dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -787,8 +787,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -935,7 +935,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -995,20 +995,20 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // vinit_bot_l = scl.vinit_bot; // vinit_bot_c = scl.vinit_bot_c; -// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; -// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; -// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; -// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; +// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; +// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; +// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; +// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; -// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; +// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1137,16 +1137,16 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1542,14 +1542,14 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1579,20 +1579,20 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src); + dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src); dml20_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h index d0b90947f540..8b23867e97c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h @@ -43,7 +43,7 @@ struct display_mode_lib; void dml20_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg @@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 1a0c14e465fa..015e7f2c0b16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -38,7 +38,7 @@ static void dml20v2_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, display_rq_params_st * rq_param, - const display_pipe_source_params_st pipe_src_param); + const display_pipe_source_params_st *pipe_src_param); // Function: dml20v2_rq_dlg_get_dlg_params // Calculate deadline related parameters @@ -49,8 +49,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en); /* @@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_source_params_st pipe_src_param, + const display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = false; @@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); } static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { // get param for luma surface - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -751,7 +751,7 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, pipe_src_param, 0); - if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -763,20 +763,20 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, // calculate how to split the det buffer space between luma and chroma handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); - dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); - extract_rq_regs(mode_lib, rq_regs, rq_param); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -787,8 +787,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -935,7 +935,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -996,20 +996,20 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // vinit_bot_l = scl.vinit_bot; // vinit_bot_c = scl.vinit_bot_c; -// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; -// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; -// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; -// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; +// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; +// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; +// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; +// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; -// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; +// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1138,16 +1138,16 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1543,14 +1543,14 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1580,20 +1580,20 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src); dml20v2_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h index 27cf8bed9376..2b4e46ea1c3d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -43,7 +43,7 @@ struct display_mode_lib; void dml20v2_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg @@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 4136eb8256cb..8a7485e21d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -3394,6 +3394,127 @@ static unsigned int TruncToValidBPP( } } + +static noinline void CalculatePrefetchSchedulePerPlane( + struct display_mode_lib *mode_lib, + int i, + unsigned j, + unsigned k) +{ + struct vba_vars_st *locals = &mode_lib->vba; + Pipe myPipe; + HostVM myHostVM; + + if (mode_lib->vba.XFCEnabled[k] == true) { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = + CalculateRemoteSurfaceFlipDelay( + mode_lib, + mode_lib->vba.VRatio[k], + locals->SwathWidthYThisState[k], + dml_ceil(locals->BytePerPixelInDETY[k], 1.0), + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + mode_lib->vba.XFCTSlvVupdateOffset, + mode_lib->vba.XFCTSlvVupdateWidth, + mode_lib->vba.XFCTSlvVreadyOffset, + mode_lib->vba.XFCXBUFLatencyTolerance, + mode_lib->vba.XFCFillBWOverhead, + mode_lib->vba.XFCSlvChunkSize, + mode_lib->vba.XFCBusTransportTime, + mode_lib->vba.TimeCalc, + mode_lib->vba.TWait, + &mode_lib->vba.SrcActiveDrainRate, + &mode_lib->vba.TInitXFill, + &mode_lib->vba.TslvChk); + } else { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; + } + + myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; + myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; + myPipe.PixelClock = mode_lib->vba.PixelClock[k]; + myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; + myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; + myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; + myPipe.SourceScan = mode_lib->vba.SourceScan[k]; + myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k]; + myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k]; + myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k]; + myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k]; + myPipe.InterlaceEnable = mode_lib->vba.Interlace[k]; + myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k]; + myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]; + myPipe.HTotal = mode_lib->vba.HTotal[k]; + + + myHostVM.Enable = mode_lib->vba.HostVMEnable; + myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels; + myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels; + + + mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule( + mode_lib, + mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, + mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, + &myPipe, + locals->DSCDelayPerState[i][k], + mode_lib->vba.DPPCLKDelaySubtotal, + mode_lib->vba.DPPCLKDelaySCL, + mode_lib->vba.DPPCLKDelaySCLLBOnly, + mode_lib->vba.DPPCLKDelayCNVCFormater, + mode_lib->vba.DPPCLKDelayCNVCCursor, + mode_lib->vba.DISPCLKDelaySubtotal, + locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.MaxInterDCNTileRepeaters, + dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), + locals->MaximumVStartup[0][0][k], + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + &myHostVM, + mode_lib->vba.DynamicMetadataEnable[k], + mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], + mode_lib->vba.DynamicMetadataTransmittedBytes[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.UrgentLatency, + mode_lib->vba.ExtraLatency, + mode_lib->vba.TimeCalc, + locals->PDEAndMetaPTEBytesPerFrame[0][0][k], + locals->MetaRowBytes[0][0][k], + locals->DPTEBytesPerRow[0][0][k], + locals->PrefetchLinesY[0][0][k], + locals->SwathWidthYThisState[k], + locals->BytePerPixelInDETY[k], + locals->PrefillY[k], + locals->MaxNumSwY[k], + locals->PrefetchLinesC[0][0][k], + locals->BytePerPixelInDETC[k], + locals->PrefillC[k], + locals->MaxNumSwC[k], + locals->SwathHeightYThisState[k], + locals->SwathHeightCThisState[k], + mode_lib->vba.TWait, + mode_lib->vba.XFCEnabled[k], + mode_lib->vba.XFCRemoteSurfaceFlipDelay, + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + &locals->dst_x_after_scaler, + &locals->dst_y_after_scaler, + &locals->LineTimesForPrefetch[k], + &locals->PrefetchBW[k], + &locals->LinesForMetaPTE[k], + &locals->LinesForMetaAndDPTERow[k], + &locals->VRatioPreY[i][j][k], + &locals->VRatioPreC[i][j][k], + &locals->RequiredPrefetchPixelDataBWLuma[i][j][k], + &locals->RequiredPrefetchPixelDataBWChroma[i][j][k], + &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata, + &locals->Tno_bw[k], + &locals->prefetch_vmrow_bw[k], + locals->swath_width_luma_ub, + locals->swath_width_chroma_ub, + &mode_lib->vba.VUpdateOffsetPix[k], + &mode_lib->vba.VUpdateWidthPix[k], + &mode_lib->vba.VReadyOffsetPix[k]); +} void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *locals = &mode_lib->vba; @@ -4676,120 +4797,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DRAMClockChangeLatency, mode_lib->vba.UrgentLatency, mode_lib->vba.SREnterPlusExitTime); - for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { - Pipe myPipe; - HostVM myHostVM; - - if (mode_lib->vba.XFCEnabled[k] == true) { - mode_lib->vba.XFCRemoteSurfaceFlipDelay = - CalculateRemoteSurfaceFlipDelay( - mode_lib, - mode_lib->vba.VRatio[k], - locals->SwathWidthYThisState[k], - dml_ceil(locals->BytePerPixelInDETY[k], 1.0), - mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], - mode_lib->vba.XFCTSlvVupdateOffset, - mode_lib->vba.XFCTSlvVupdateWidth, - mode_lib->vba.XFCTSlvVreadyOffset, - mode_lib->vba.XFCXBUFLatencyTolerance, - mode_lib->vba.XFCFillBWOverhead, - mode_lib->vba.XFCSlvChunkSize, - mode_lib->vba.XFCBusTransportTime, - mode_lib->vba.TimeCalc, - mode_lib->vba.TWait, - &mode_lib->vba.SrcActiveDrainRate, - &mode_lib->vba.TInitXFill, - &mode_lib->vba.TslvChk); - } else { - mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; - } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) + CalculatePrefetchSchedulePerPlane(mode_lib, i, j, k); - myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k]; - myPipe.DISPCLK = locals->RequiredDISPCLK[i][j]; - myPipe.PixelClock = mode_lib->vba.PixelClock[k]; - myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0]; - myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k]; - myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k]; - myPipe.SourceScan = mode_lib->vba.SourceScan[k]; - myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k]; - myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k]; - myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k]; - myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k]; - myPipe.InterlaceEnable = mode_lib->vba.Interlace[k]; - myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k]; - myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]; - myPipe.HTotal = mode_lib->vba.HTotal[k]; - - - myHostVM.Enable = mode_lib->vba.HostVMEnable; - myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels; - myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels; - - - mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule( - mode_lib, - mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData, - mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly, - &myPipe, - locals->DSCDelayPerState[i][k], - mode_lib->vba.DPPCLKDelaySubtotal, - mode_lib->vba.DPPCLKDelaySCL, - mode_lib->vba.DPPCLKDelaySCLLBOnly, - mode_lib->vba.DPPCLKDelayCNVCFormater, - mode_lib->vba.DPPCLKDelayCNVCCursor, - mode_lib->vba.DISPCLKDelaySubtotal, - locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k], - mode_lib->vba.OutputFormat[k], - mode_lib->vba.MaxInterDCNTileRepeaters, - dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]), - locals->MaximumVStartup[0][0][k], - mode_lib->vba.GPUVMMaxPageTableLevels, - mode_lib->vba.GPUVMEnable, - &myHostVM, - mode_lib->vba.DynamicMetadataEnable[k], - mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], - mode_lib->vba.DynamicMetadataTransmittedBytes[k], - mode_lib->vba.DCCEnable[k], - mode_lib->vba.UrgentLatency, - mode_lib->vba.ExtraLatency, - mode_lib->vba.TimeCalc, - locals->PDEAndMetaPTEBytesPerFrame[0][0][k], - locals->MetaRowBytes[0][0][k], - locals->DPTEBytesPerRow[0][0][k], - locals->PrefetchLinesY[0][0][k], - locals->SwathWidthYThisState[k], - locals->BytePerPixelInDETY[k], - locals->PrefillY[k], - locals->MaxNumSwY[k], - locals->PrefetchLinesC[0][0][k], - locals->BytePerPixelInDETC[k], - locals->PrefillC[k], - locals->MaxNumSwC[k], - locals->SwathHeightYThisState[k], - locals->SwathHeightCThisState[k], - mode_lib->vba.TWait, - mode_lib->vba.XFCEnabled[k], - mode_lib->vba.XFCRemoteSurfaceFlipDelay, - mode_lib->vba.ProgressiveToInterlaceUnitInOPP, - &locals->dst_x_after_scaler, - &locals->dst_y_after_scaler, - &locals->LineTimesForPrefetch[k], - &locals->PrefetchBW[k], - &locals->LinesForMetaPTE[k], - &locals->LinesForMetaAndDPTERow[k], - &locals->VRatioPreY[i][j][k], - &locals->VRatioPreC[i][j][k], - &locals->RequiredPrefetchPixelDataBWLuma[i][j][k], - &locals->RequiredPrefetchPixelDataBWChroma[i][j][k], - &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata, - &locals->Tno_bw[k], - &locals->prefetch_vmrow_bw[k], - locals->swath_width_luma_ub, - locals->swath_width_chroma_ub, - &mode_lib->vba.VUpdateOffsetPix[k], - &mode_lib->vba.VUpdateWidthPix[k], - &mode_lib->vba.VReadyOffsetPix[k]); - } mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 287e31052b30..46c433c0bcb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -141,55 +141,55 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs( struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); rq_regs->rq_regs_l.pte_row_height_linear = dml_floor( - dml_log2(rq_param.dlg.rq_l.dpte_row_height), + dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); rq_regs->rq_regs_c.pte_row_height_linear = dml_floor( - dml_log2(rq_param.dlg.rq_c.dpte_row_height), + dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -198,9 +198,9 @@ static void extract_rq_regs( rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple( @@ -215,7 +215,7 @@ static void extract_rq_regs( static void handle_det_buf_split( struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -224,8 +224,8 @@ static void handle_det_buf_split( unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -694,7 +694,7 @@ static void get_surf_rq_param( display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma) { bool mode_422 = false; @@ -706,30 +706,30 @@ static void get_surf_rq_param( // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir; unsigned int full_src_vp_width; unsigned int hactive_half; unsigned int src_hactive_half; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_half = pipe_param.dest.hactive / 2; + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_half = pipe_param->dest.hactive / 2; if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half; } if (access_dir == 0) { @@ -754,7 +754,7 @@ static void get_surf_rq_param( rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -768,23 +768,23 @@ static void get_surf_rq_param( vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma); } static void dml_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 - || pipe_param.src.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 + || pipe_param->src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; get_surf_rq_param( mode_lib, @@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params( pipe_param, 0); - if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param( mode_lib, @@ -806,22 +806,22 @@ static void dml_rq_dlg_get_rq_params( } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } void dml21_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } // Note: currently taken in as is. @@ -833,8 +833,8 @@ static void dml_rq_dlg_get_dlg_params( const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en) { @@ -981,7 +981,7 @@ static void dml_rq_dlg_get_dlg_params( * (double) ref_freq_to_pix_freq); ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13)); - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); @@ -1042,13 +1042,13 @@ static void dml_rq_dlg_get_dlg_params( scl_enable = scl->scl_enable; line_time_in_us = (htotal / pclk_freq_in_mhz); - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1189,16 +1189,16 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1650,15 +1650,15 @@ static void dml_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml21_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1691,12 +1691,12 @@ void dml21_rq_dlg_get_dlg_reg( dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params( mode_lib, e2e_pipe_param, @@ -1704,8 +1704,8 @@ void dml21_rq_dlg_get_dlg_reg( pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en); dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h index e8f7785e3fc6..af6ad0ca9cf8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h @@ -44,7 +44,7 @@ struct display_mode_lib; void dml21_rq_dlg_get_rq_reg( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e3d9f1decdfc..f47d82da115c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3576,16 +3576,9 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } - else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; if (Format == dm_n422) { MinDSCBPP = 7; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index 0d934fae1c3a..aef854270054 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -89,52 +89,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, - const display_data_rq_sizing_params_st rq_sizing) + const display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_DLG: %s: rq_sizing param\n", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_rq_params_st rq_param) + const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -143,9 +143,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double)rq_param.misc.rq_l.stored_swath_bytes - / (double)rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double)rq_param->misc.rq_l.stored_swath_bytes + / (double)rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma } else { detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0), @@ -158,7 +158,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_source_params_st pipe_src_param) + const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -167,8 +167,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, unsigned int full_swath_bytes_packed_c = 0; bool req128_l = false; bool req128_c = false; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma, bool is_alpha) { @@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma | is_alpha) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; - surface_height = pipe_param.src.surface_height_y / 2.0; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; + surface_height = pipe_param->src.surface_height_y / 2.0; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; - surface_height = pipe_param.src.surface_height_y; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; + surface_height = pipe_param->src.surface_height_y; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir = 0; unsigned int full_src_vp_width = 0; unsigned int hactive_odm = 0; unsigned int src_hactive_odm = 0; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2); + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2); if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; } if (access_dir == 0) { @@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma, surface_height); } static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 - || pipe_param.src.source_format == dm_420_10 - || pipe_param.src.source_format == dm_rgbe_alpha - || pipe_param.src.source_format == dm_420_12; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 + || pipe_param->src.source_format == dm_420_10 + || pipe_param->src.source_format == dm_rgbe_alpha + || pipe_param->src.source_format == dm_420_12; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0; + rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), @@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, 0, 0); - if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), @@ -871,21 +871,21 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param) + const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = { 0 }; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, @@ -1824,14 +1824,14 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1861,12 +1861,12 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params(mode_lib, e2e_pipe_param, num_pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h index c04965cceff3..625e41f8d575 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h @@ -41,7 +41,7 @@ struct display_mode_lib; // See also: <display_rq_regs_st> void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c new file mode 100644 index 000000000000..94c32832a0e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -0,0 +1,390 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "resource.h" +#include "clk_mgr.h" +#include "dcn20/dcn20_resource.h" +#include "dcn301/dcn301_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" +#include "dcn301_fpu.h" + +#define TO_DCN301_RES_POOL(pool)\ + container_of(pool, struct dcn301_resource_pool, base) + +/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */ +struct _vcs_dpi_ip_params_st dcn3_01_ip = { + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, + .gpuvm_max_page_table_levels = 1, + .hostvm_max_page_table_levels = 2, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 3, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 32, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 48, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 656640, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 4, + .max_num_dpp = 4, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 0, + .odm_combine_4to1_supported = true, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = { + .clock_limits = { + { + .state = 0, + .dram_speed_mts = 2400.0, + .fabricclk_mhz = 600, + .socclk_mhz = 278.0, + .dcfclk_mhz = 400.0, + .dscclk_mhz = 206.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 600.0, + }, + + { + .state = 1, + .dram_speed_mts = 2400.0, + .fabricclk_mhz = 688, + .socclk_mhz = 278.0, + .dcfclk_mhz = 400.0, + .dscclk_mhz = 206.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 600.0, + }, + + { + .state = 2, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 278.0, + .dcfclk_mhz = 608.0, + .dscclk_mhz = 296.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + + { + .state = 3, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 715.0, + .dcfclk_mhz = 676.0, + .dscclk_mhz = 338.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + + { + .state = 4, + .dram_speed_mts = 4267.0, + .fabricclk_mhz = 1067, + .socclk_mhz = 953.0, + .dcfclk_mhz = 810.0, + .dscclk_mhz = 338.0, + .dppclk_mhz = 1015.0, + .dispclk_mhz = 1015.0, + .phyclk_mhz = 810.0, + }, + }, + + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 60.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 4, + .fabric_datapath_to_dcn_data_return_bytes = 32, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 191, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 4, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 23.84, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3550, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .num_states = 5, + .do_urgent_latency_adjustment = false, + .urgent_latency_adjustment_fabric_clock_component_us = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, +}; + +static void calculate_wm_set_for_vlevel(int vlevel, + struct wm_range_table_entry *table_entry, + struct dcn_watermarks *wm_set, + struct display_mode_lib *dml, + display_e2e_pipe_params_st *pipes, + int pipe_cnt) +{ + double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us; + + ASSERT(vlevel < dml->soc.num_states); + /* only pipe 0 is read for voltage and dcf/soc clocks */ + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz; + + dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us; + dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us; + dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us; + + wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; + wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; + wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; + wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; + dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; + +} + +void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool); + struct clk_limit_table *clk_table = &bw_params->clk_table; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + unsigned int i, closest_clk_lvl; + int j; + + dc_assert_fp_enabled(); + + /* Default clock levels are used for diags, which may lead to overclocking. */ + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { + dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator; + dcn3_01_ip.max_num_dpp = pool->base.pipe_count; + dcn3_01_soc.num_chans = bw_params->num_channels; + + ASSERT(clk_table->num_entries); + for (i = 0; i < clk_table->num_entries; i++) { + /* loop backwards*/ + for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) { + if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { + closest_clk_lvl = j; + break; + } + } + + clock_limits[i].state = i; + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; + + clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz; + clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz; + clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; + clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz; + clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; + clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; + clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz; + } + + for (i = 0; i < clk_table->num_entries; i++) + dcn3_01_soc.clock_limits[i] = clock_limits[i]; + + if (clk_table->num_entries) { + dcn3_01_soc.num_states = clk_table->num_entries; + /* duplicate last level */ + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1]; + dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states; + } + } + + dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30); +} + +void dcn301_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb) +{ + dc_assert_fp_enabled(); + + ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0; + ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16; +} + +void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) +{ + dc_assert_fp_enabled(); + + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; +} + +void dcn301_calculate_wm_and_dlg(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req) +{ + int i, pipe_idx; + int vlevel, vlevel_max; + struct wm_range_table_entry *table_entry; + struct clk_bw_params *bw_params = dc->clk_mgr->bw_params; + + ASSERT(bw_params); + dc_assert_fp_enabled(); + + vlevel_max = bw_params->clk_table.num_entries - 1; + + /* WM Set D */ + table_entry = &bw_params->wm_table.entries[WM_D]; + if (table_entry->wm_type == WM_TYPE_RETRAINING) + vlevel = 0; + else + vlevel = vlevel_max; + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set C */ + table_entry = &bw_params->wm_table.entries[WM_C]; + vlevel = min(max(vlevel_req, 2), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c, + &context->bw_ctx.dml, pipes, pipe_cnt); + /* WM Set B */ + table_entry = &bw_params->wm_table.entries[WM_B]; + vlevel = min(max(vlevel_req, 1), vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b, + &context->bw_ctx.dml, pipes, pipe_cnt); + + /* WM Set A */ + table_entry = &bw_params->wm_table.entries[WM_A]; + vlevel = min(vlevel_req, vlevel_max); + calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a, + &context->bw_ctx.dml, pipes, pipe_cnt); + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + pipe_idx++; + } + + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h new file mode 100644 index 000000000000..fc7065d17842 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN301_FPU_H__ +#define __DCN301_FPU_H__ + +void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +void dcn301_fpu_set_wm_ranges(int i, + struct pp_smu_wm_range_sets *ranges, + struct _vcs_dpi_soc_bounding_box_st *loaded_bb); + +void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); + +void dcn301_calculate_wm_and_dlg(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel_req); +#endif /* __DCN301_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index ce55c9caf9a2..7e937bdcea00 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3892,15 +3892,11 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16; } else { - if (Output == dm_hdmi) { - NonDSCBPP0 = 24; - NonDSCBPP1 = 24; - NonDSCBPP2 = 24; - } else { - NonDSCBPP0 = 16; - NonDSCBPP1 = 20; - NonDSCBPP2 = 24; - } + + NonDSCBPP0 = 16; + NonDSCBPP1 = 20; + NonDSCBPP2 = 24; + if (Format == dm_n422) { MinDSCBPP = 7; MaxDSCBPP = 2 * DSCInputBitPerComponent - 1.0 / 16.0; @@ -5398,9 +5394,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->MaximumReadBandwidthWithPrefetch = v->MaximumReadBandwidthWithPrefetch - + dml_max4( - v->VActivePixelBandwidth[i][j][k], - v->VActiveCursorBandwidth[i][j][k] + + dml_max3( + v->VActivePixelBandwidth[i][j][k] + + v->VActiveCursorBandwidth[i][j][k] + v->NoOfDPP[i][j][k] * (v->meta_row_bandwidth[i][j][k] + v->dpte_row_bandwidth[i][j][k]), diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c index c23905bc733a..e0fecf127bd5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c @@ -175,47 +175,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si return (4 * 1024); } -static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st rq_sizing) +static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing) { print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } -static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st rq_param) +static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); - rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), 1) - 3; + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3; - if (rq_param.yuv420) { - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); - rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), 1) - 3; + if (rq_param->yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3; } - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); // FIXME: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -225,8 +225,8 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s rq_regs->crq_expansion_mode = 1; // Note: detile_buf_plane1_addr is in unit of 1KB - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma #ifdef __DML_RQ_DLG_CALC_DEBUG__ dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr); @@ -244,14 +244,14 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes); dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr); dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address); - dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_l.stored_swath_bytes); - dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_c.stored_swath_bytes); - dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param.dlg.rq_l.swath_height); - dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param.dlg.rq_c.swath_height); + dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes); + dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes); + dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height); + dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height); #endif } -static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st pipe_src_param) +static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -260,8 +260,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_p unsigned int full_swath_bytes_packed_c = 0; bool req128_l = 0; bool req128_c = 0; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -738,7 +738,7 @@ static void get_surf_rq_param( display_data_rq_sizing_params_st *rq_sizing_param, display_data_rq_dlg_params_st *rq_dlg_param, display_data_rq_misc_params_st *rq_misc_param, - const display_pipe_params_st pipe_param, + const display_pipe_params_st *pipe_param, bool is_chroma, bool is_alpha) { @@ -752,33 +752,33 @@ static void get_surf_rq_param( // FIXME check if ppe apply for both luma and chroma in 422 case if (is_chroma | is_alpha) { - vp_width = pipe_param.src.viewport_width_c / ppe; - vp_height = pipe_param.src.viewport_height_c; - data_pitch = pipe_param.src.data_pitch_c; - meta_pitch = pipe_param.src.meta_pitch_c; - surface_height = pipe_param.src.surface_height_y / 2.0; + vp_width = pipe_param->src.viewport_width_c / ppe; + vp_height = pipe_param->src.viewport_height_c; + data_pitch = pipe_param->src.data_pitch_c; + meta_pitch = pipe_param->src.meta_pitch_c; + surface_height = pipe_param->src.surface_height_y / 2.0; } else { - vp_width = pipe_param.src.viewport_width / ppe; - vp_height = pipe_param.src.viewport_height; - data_pitch = pipe_param.src.data_pitch; - meta_pitch = pipe_param.src.meta_pitch; - surface_height = pipe_param.src.surface_height_y; + vp_width = pipe_param->src.viewport_width / ppe; + vp_height = pipe_param->src.viewport_height; + data_pitch = pipe_param->src.data_pitch; + meta_pitch = pipe_param->src.meta_pitch; + surface_height = pipe_param->src.surface_height_y; } - if (pipe_param.dest.odm_combine) { + if (pipe_param->dest.odm_combine) { unsigned int access_dir; unsigned int full_src_vp_width; unsigned int hactive_odm; unsigned int src_hactive_odm; - access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed - hactive_odm = pipe_param.dest.hactive / ((unsigned int) pipe_param.dest.odm_combine * 2); + access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed + hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2); if (is_chroma) { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm; } else { - full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width; - src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm; + full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width; + src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm; } if (access_dir == 0) { @@ -808,7 +808,7 @@ static void get_surf_rq_param( rq_sizing_param->meta_chunk_bytes = 2048; rq_sizing_param->min_meta_chunk_bytes = 256; - if (pipe_param.src.hostvm) + if (pipe_param->src.hostvm) rq_sizing_param->mpte_group_bytes = 512; else rq_sizing_param->mpte_group_bytes = 2048; @@ -822,46 +822,46 @@ static void get_surf_rq_param( vp_height, data_pitch, meta_pitch, - pipe_param.src.source_format, - pipe_param.src.sw_mode, - pipe_param.src.macro_tile_size, - pipe_param.src.source_scan, - pipe_param.src.hostvm, + pipe_param->src.source_format, + pipe_param->src.sw_mode, + pipe_param->src.macro_tile_size, + pipe_param->src.source_scan, + pipe_param->src.hostvm, is_chroma, surface_height); } -static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st pipe_param) +static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param) { // get param for luma surface - rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 || pipe_param.src.source_format == dm_420_10 || pipe_param.src.source_format == dm_rgbe_alpha - || pipe_param.src.source_format == dm_420_12; + rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha + || pipe_param->src.source_format == dm_420_12; - rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10; - rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha) ? 1 : 0; + rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0; get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0); - if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) { + if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) { // get param for chroma surface get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha); } // calculate how to split the det buffer space between luma and chroma - handle_det_buf_split(mode_lib, rq_param, pipe_param.src); - print__rq_params_st(mode_lib, *rq_param); + handle_det_buf_split(mode_lib, rq_param, &pipe_param->src); + print__rq_params_st(mode_lib, rq_param); } -void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st pipe_param) +void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param) { display_rq_params_st rq_param = {0}; memset(rq_regs, 0, sizeof(*rq_regs)); dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param); - extract_rq_regs(mode_lib, rq_regs, rq_param); + extract_rq_regs(mode_lib, rq_regs, &rq_param); - print__rq_regs_st(mode_lib, *rq_regs); + print__rq_regs_st(mode_lib, rq_regs); } static void calculate_ttu_cursor( @@ -943,8 +943,8 @@ static void dml_rq_dlg_get_dlg_params( const unsigned int pipe_idx, display_dlg_regs_st *disp_dlg_regs, display_ttu_regs_st *disp_ttu_regs, - const display_rq_dlg_params_st rq_dlg_param, - const display_dlg_sys_params_st dlg_sys_param, + const display_rq_dlg_params_st *rq_dlg_param, + const display_dlg_sys_params_st *dlg_sys_param, const bool cstate_en, const bool pstate_en, const bool vm_en, @@ -1112,13 +1112,13 @@ static void dml_rq_dlg_get_dlg_params( vratio_c = scl->vscl_ratio_c; scl_enable = scl->scl_enable; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub; vupdate_offset = dst->vupdate_offset; vupdate_width = dst->vupdate_width; vready_offset = dst->vready_offset; @@ -1239,16 +1239,16 @@ static void dml_rq_dlg_get_dlg_params( dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c); // Active - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; - meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param->rq_c.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; if (mode_422) { swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element @@ -1669,15 +1669,15 @@ static void dml_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } void dml31_rq_dlg_get_dlg_reg( struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -1699,12 +1699,12 @@ void dml31_rq_dlg_get_dlg_reg( dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes); dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - print__dlg_sys_params_st(mode_lib, dlg_sys_param); + print__dlg_sys_params_st(mode_lib, &dlg_sys_param); // system parameter calculation done dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); - dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe); + dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe); dml_rq_dlg_get_dlg_params( mode_lib, e2e_pipe_param, @@ -1712,8 +1712,8 @@ void dml31_rq_dlg_get_dlg_reg( pipe_idx, dlg_regs, ttu_regs, - rq_param.dlg, - dlg_sys_param, + &rq_param.dlg, + &dlg_sys_param, cstate_en, pstate_en, vm_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h index adf8518f761f..8ee991351699 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h @@ -41,7 +41,7 @@ struct display_mode_lib; // See also: <display_rq_regs_st> void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); // Function: dml_rq_dlg_get_dlg_reg // Calculate and return DLG and TTU register struct given the system setting @@ -57,7 +57,7 @@ void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, void dml31_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index 1051ca1a23b8..edb9f7567d6d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -80,11 +80,11 @@ enum dm_swizzle_mode { dm_sw_SPARE_13 = 24, dm_sw_64kb_s_x = 25, dm_sw_64kb_d_x = 26, - dm_sw_SPARE_14 = 27, + dm_sw_64kb_r_x = 27, dm_sw_SPARE_15 = 28, dm_sw_var_s_x = 29, dm_sw_var_d_x = 30, - dm_sw_64kb_r_x, + dm_sw_var_r_x = 31, dm_sw_gfx7_2d_thin_l_vp, dm_sw_gfx7_2d_thin_gl, }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 8a5bd919aec8..30db51fbd8cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -82,6 +82,7 @@ void dml_init_instance(struct display_mode_lib *lib, lib->project = project; switch (project) { case DML_PROJECT_NAVI10: + case DML_PROJECT_DCN201: lib->funcs = dml20_funcs; break; case DML_PROJECT_NAVI10v2: diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index d42a0aeca6be..d76251fd1566 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -36,6 +36,7 @@ enum dml_project { DML_PROJECT_RAVEN1, DML_PROJECT_NAVI10, DML_PROJECT_NAVI10v2, + DML_PROJECT_DCN201, DML_PROJECT_DCN21, DML_PROJECT_DCN30, DML_PROJECT_DCN31, @@ -49,7 +50,7 @@ struct dml_funcs { struct display_mode_lib *mode_lib, display_dlg_regs_st *dlg_regs, display_ttu_regs_st *ttu_regs, - display_e2e_pipe_params_st *e2e_pipe_param, + const display_e2e_pipe_params_st *e2e_pipe_param, const unsigned int num_pipes, const unsigned int pipe_idx, const bool cstate_en, @@ -60,7 +61,7 @@ struct dml_funcs { void (*rq_dlg_get_rq_reg)( struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, - const display_pipe_params_st pipe_param); + const display_pipe_params_st *pipe_param); void (*recalculate)(struct display_mode_lib *mode_lib); void (*validate)(struct display_mode_lib *mode_lib); }; @@ -72,6 +73,7 @@ struct display_mode_lib { struct vba_vars_st vba; struct dal_logger *logger; struct dml_funcs funcs; + struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; }; void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c index e2d82aacd3bc..71ea503cb32f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c @@ -26,371 +26,371 @@ #include "display_rq_dlg_helpers.h" #include "dml_logger.h" -void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param) +void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param) { dml_print("DML_RQ_DLG_CALC: ***************************\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l); + print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n"); - print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c); + print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_c); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l); + print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c); + print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_c); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l); + print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c); + print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_c); dml_print("DML_RQ_DLG_CALC: ***************************\n"); } -void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing) +void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n"); - dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes); - dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes); - dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes); + dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing->min_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing->meta_chunk_bytes); dml_print( "DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n", - rq_sizing.min_meta_chunk_bytes); - dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes); - dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes); + rq_sizing->min_meta_chunk_bytes); + dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing->mpte_group_bytes); + dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing->dpte_group_bytes); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param) +void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n"); dml_print( "DML_RQ_DLG_CALC: swath_width_ub = %0d\n", - rq_dlg_param.swath_width_ub); + rq_dlg_param->swath_width_ub); dml_print( "DML_RQ_DLG_CALC: swath_height = %0d\n", - rq_dlg_param.swath_height); + rq_dlg_param->swath_height); dml_print( "DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n", - rq_dlg_param.req_per_swath_ub); + rq_dlg_param->req_per_swath_ub); dml_print( "DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n", - rq_dlg_param.meta_pte_bytes_per_frame_ub); + rq_dlg_param->meta_pte_bytes_per_frame_ub); dml_print( "DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n", - rq_dlg_param.dpte_req_per_row_ub); + rq_dlg_param->dpte_req_per_row_ub); dml_print( "DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n", - rq_dlg_param.dpte_groups_per_row_ub); + rq_dlg_param->dpte_groups_per_row_ub); dml_print( "DML_RQ_DLG_CALC: dpte_row_height = %0d\n", - rq_dlg_param.dpte_row_height); + rq_dlg_param->dpte_row_height); dml_print( "DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n", - rq_dlg_param.dpte_bytes_per_row_ub); + rq_dlg_param->dpte_bytes_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n", - rq_dlg_param.meta_chunks_per_row_ub); + rq_dlg_param->meta_chunks_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n", - rq_dlg_param.meta_req_per_row_ub); + rq_dlg_param->meta_req_per_row_ub); dml_print( "DML_RQ_DLG_CALC: meta_row_height = %0d\n", - rq_dlg_param.meta_row_height); + rq_dlg_param->meta_row_height); dml_print( "DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n", - rq_dlg_param.meta_bytes_per_row_ub); + rq_dlg_param->meta_bytes_per_row_ub); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param) +void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n"); dml_print( "DML_RQ_DLG_CALC: full_swath_bytes = %0d\n", - rq_misc_param.full_swath_bytes); + rq_misc_param->full_swath_bytes); dml_print( "DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n", - rq_misc_param.stored_swath_bytes); - dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width); - dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height); - dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width); - dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height); + rq_misc_param->stored_swath_bytes); + dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param->blk256_width); + dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param->blk256_height); + dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param->req_width); + dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param->req_height); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param) +void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l); + print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c); + print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param) +void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n"); - dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us); - dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us); - dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us); - dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us); + dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us); + dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us); + dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us); + dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us); dml_print( "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n", - dlg_sys_param.t_srx_delay_us); + dlg_sys_param->t_srx_delay_us); dml_print( "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n", - dlg_sys_param.deepsleep_dcfclk_mhz); + dlg_sys_param->deepsleep_dcfclk_mhz); dml_print( "DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n", - dlg_sys_param.total_flip_bw); + dlg_sys_param->total_flip_bw); dml_print( "DML_RQ_DLG_CALC: total_flip_bytes = %i\n", - dlg_sys_param.total_flip_bytes); + dlg_sys_param->total_flip_bytes); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs) +void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n"); - dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size); - dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size); - dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size); + dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size); + dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs->min_chunk_size); + dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs->meta_chunk_size); dml_print( "DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n", - rq_regs.min_meta_chunk_size); - dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size); - dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size); - dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height); + rq_regs->min_meta_chunk_size); + dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs->dpte_group_size); + dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs->mpte_group_size); + dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs->swath_height); dml_print( "DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n", - rq_regs.pte_row_height_linear); + rq_regs->pte_row_height_linear); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs) +void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n"); dml_print("DML_RQ_DLG_CALC: <LUMA>\n"); - print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l); + print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_l); dml_print("DML_RQ_DLG_CALC: <CHROMA>\n"); - print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c); - dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode); - dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address); + print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_c); + dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode); + dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs) +void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n"); dml_print( "DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n", - dlg_regs.refcyc_h_blank_end); + dlg_regs->refcyc_h_blank_end); dml_print( "DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n", - dlg_regs.dlg_vblank_end); + dlg_regs->dlg_vblank_end); dml_print( "DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n", - dlg_regs.min_dst_y_next_start); + dlg_regs->min_dst_y_next_start); dml_print( "DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n", - dlg_regs.refcyc_per_htotal); + dlg_regs->refcyc_per_htotal); dml_print( "DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n", - dlg_regs.refcyc_x_after_scaler); + dlg_regs->refcyc_x_after_scaler); dml_print( "DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n", - dlg_regs.dst_y_after_scaler); + dlg_regs->dst_y_after_scaler); dml_print( "DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n", - dlg_regs.dst_y_prefetch); + dlg_regs->dst_y_prefetch); dml_print( "DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n", - dlg_regs.dst_y_per_vm_vblank); + dlg_regs->dst_y_per_vm_vblank); dml_print( "DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n", - dlg_regs.dst_y_per_row_vblank); + dlg_regs->dst_y_per_row_vblank); dml_print( "DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n", - dlg_regs.dst_y_per_vm_flip); + dlg_regs->dst_y_per_vm_flip); dml_print( "DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n", - dlg_regs.dst_y_per_row_flip); + dlg_regs->dst_y_per_row_flip); dml_print( "DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n", - dlg_regs.ref_freq_to_pix_freq); + dlg_regs->ref_freq_to_pix_freq); dml_print( "DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n", - dlg_regs.vratio_prefetch); + dlg_regs->vratio_prefetch); dml_print( "DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n", - dlg_regs.vratio_prefetch_c); + dlg_regs->vratio_prefetch_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_vblank_l); + dlg_regs->refcyc_per_pte_group_vblank_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_vblank_c); + dlg_regs->refcyc_per_pte_group_vblank_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_vblank_l); + dlg_regs->refcyc_per_meta_chunk_vblank_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_vblank_c); + dlg_regs->refcyc_per_meta_chunk_vblank_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_flip_l); + dlg_regs->refcyc_per_pte_group_flip_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_flip_c); + dlg_regs->refcyc_per_pte_group_flip_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_flip_l); + dlg_regs->refcyc_per_meta_chunk_flip_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_flip_c); + dlg_regs->refcyc_per_meta_chunk_flip_c); dml_print( "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n", - dlg_regs.dst_y_per_pte_row_nom_l); + dlg_regs->dst_y_per_pte_row_nom_l); dml_print( "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n", - dlg_regs.dst_y_per_pte_row_nom_c); + dlg_regs->dst_y_per_pte_row_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_nom_l); + dlg_regs->refcyc_per_pte_group_nom_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n", - dlg_regs.refcyc_per_pte_group_nom_c); + dlg_regs->refcyc_per_pte_group_nom_c); dml_print( "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n", - dlg_regs.dst_y_per_meta_row_nom_l); + dlg_regs->dst_y_per_meta_row_nom_l); dml_print( "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n", - dlg_regs.dst_y_per_meta_row_nom_c); + dlg_regs->dst_y_per_meta_row_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_nom_l); + dlg_regs->refcyc_per_meta_chunk_nom_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n", - dlg_regs.refcyc_per_meta_chunk_nom_c); + dlg_regs->refcyc_per_meta_chunk_nom_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_pre_l); + dlg_regs->refcyc_per_line_delivery_pre_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_pre_c); + dlg_regs->refcyc_per_line_delivery_pre_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_l); + dlg_regs->refcyc_per_line_delivery_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n", - dlg_regs.refcyc_per_line_delivery_c); + dlg_regs->refcyc_per_line_delivery_c); dml_print( "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n", - dlg_regs.chunk_hdl_adjust_cur0); + dlg_regs->chunk_hdl_adjust_cur0); dml_print( "DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n", - dlg_regs.dst_y_offset_cur1); + dlg_regs->dst_y_offset_cur1); dml_print( "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n", - dlg_regs.chunk_hdl_adjust_cur1); + dlg_regs->chunk_hdl_adjust_cur1); dml_print( "DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n", - dlg_regs.vready_after_vcount0); + dlg_regs->vready_after_vcount0); dml_print( "DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n", - dlg_regs.dst_y_delta_drq_limit); + dlg_regs->dst_y_delta_drq_limit); dml_print( "DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n", - dlg_regs.xfc_reg_transfer_delay); + dlg_regs->xfc_reg_transfer_delay); dml_print( "DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n", - dlg_regs.xfc_reg_precharge_delay); + dlg_regs->xfc_reg_precharge_delay); dml_print( "DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n", - dlg_regs.xfc_reg_remote_surface_flip_latency); + dlg_regs->xfc_reg_remote_surface_flip_latency); dml_print( "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n", - dlg_regs.refcyc_per_vm_dmdata); + dlg_regs->refcyc_per_vm_dmdata); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } -void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs) +void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs) { dml_print("DML_RQ_DLG_CALC: =====================================\n"); dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n"); dml_print( "DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n", - ttu_regs.qos_level_low_wm); + ttu_regs->qos_level_low_wm); dml_print( "DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n", - ttu_regs.qos_level_high_wm); + ttu_regs->qos_level_high_wm); dml_print( "DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n", - ttu_regs.min_ttu_vblank); + ttu_regs->min_ttu_vblank); dml_print( "DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n", - ttu_regs.qos_level_flip); + ttu_regs->qos_level_flip); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_l); + ttu_regs->refcyc_per_req_delivery_pre_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_l); + ttu_regs->refcyc_per_req_delivery_l); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_c); + ttu_regs->refcyc_per_req_delivery_pre_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_c); + ttu_regs->refcyc_per_req_delivery_c); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_cur0); + ttu_regs->refcyc_per_req_delivery_cur0); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_cur0); + ttu_regs->refcyc_per_req_delivery_pre_cur0); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_cur1); + ttu_regs->refcyc_per_req_delivery_cur1); dml_print( "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", - ttu_regs.refcyc_per_req_delivery_pre_cur1); + ttu_regs->refcyc_per_req_delivery_pre_cur1); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n", - ttu_regs.qos_level_fixed_l); + ttu_regs->qos_level_fixed_l); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n", - ttu_regs.qos_ramp_disable_l); + ttu_regs->qos_ramp_disable_l); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n", - ttu_regs.qos_level_fixed_c); + ttu_regs->qos_level_fixed_c); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n", - ttu_regs.qos_ramp_disable_c); + ttu_regs->qos_ramp_disable_c); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n", - ttu_regs.qos_level_fixed_cur0); + ttu_regs->qos_level_fixed_cur0); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n", - ttu_regs.qos_ramp_disable_cur0); + ttu_regs->qos_ramp_disable_cur0); dml_print( "DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n", - ttu_regs.qos_level_fixed_cur1); + ttu_regs->qos_level_fixed_cur1); dml_print( "DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n", - ttu_regs.qos_ramp_disable_cur1); + ttu_regs->qos_ramp_disable_cur1); dml_print("DML_RQ_DLG_CALC: =====================================\n"); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h index 2555ef0358c2..ebcd717744e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h @@ -31,16 +31,16 @@ /* Function: Printer functions * Print various struct */ -void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param); -void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing); -void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param); -void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param); -void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param); -void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param); +void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param); +void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing); +void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param); +void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param); +void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param); +void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param); -void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs); -void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs); -void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs); -void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs); +void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs); +void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs); +void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs); +void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 8f2b1684c231..59dc2c5b58dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -206,47 +206,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si static void extract_rq_sizing_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_data_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing) + const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing) { DTRACE("DLG: %s: rq_sizing param", __func__); print__data_rq_sizing_params_st(mode_lib, rq_sizing); - rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10; - if (rq_sizing.min_chunk_bytes == 0) + if (rq_sizing->min_chunk_bytes == 0) rq_regs->min_chunk_size = 0; else - rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1; - rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; - if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10; + if (rq_sizing->min_meta_chunk_bytes == 0) rq_regs->min_meta_chunk_size = 0; else - rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1; - rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; - rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; + rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6; } void dml1_extract_rq_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_rq_params_st rq_param) + const struct _vcs_dpi_display_rq_params_st *rq_param) { unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; unsigned int detile_buf_plane1_addr = 0; - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); - if (rq_param.yuv420) - extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l); + if (rq_param->yuv420) + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c); - rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); - rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height); /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ - if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ + if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ rq_regs->drq_expansion_mode = 0; } else { rq_regs->drq_expansion_mode = 2; @@ -255,9 +255,9 @@ void dml1_extract_rq_regs( rq_regs->mrq_expansion_mode = 1; rq_regs->crq_expansion_mode = 1; - if (rq_param.yuv420) { - if ((double) rq_param.misc.rq_l.stored_swath_bytes - / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + if (rq_param->yuv420) { + if ((double) rq_param->misc.rq_l.stored_swath_bytes + / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) { detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */ } else { detile_buf_plane1_addr = dml_round_to_multiple( @@ -272,7 +272,7 @@ void dml1_extract_rq_regs( static void handle_det_buf_split( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param) + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param) { unsigned int total_swath_bytes = 0; unsigned int swath_bytes_l = 0; @@ -281,8 +281,8 @@ static void handle_det_buf_split( unsigned int full_swath_bytes_packed_c = 0; bool req128_l = 0; bool req128_c = 0; - bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - bool surf_vert = (pipe_src_param.source_scan == dm_vert); + bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param->source_scan == dm_vert); unsigned int log2_swath_height_l = 0; unsigned int log2_swath_height_c = 0; unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; @@ -556,7 +556,7 @@ static void get_surf_rq_param( struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param, struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param, struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param, + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param, bool is_chroma) { bool mode_422 = 0; @@ -622,15 +622,15 @@ static void get_surf_rq_param( /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { - vp_width = pipe_src_param.viewport_width_c / ppe; - vp_height = pipe_src_param.viewport_height_c; - data_pitch = pipe_src_param.data_pitch_c; - meta_pitch = pipe_src_param.meta_pitch_c; + vp_width = pipe_src_param->viewport_width_c / ppe; + vp_height = pipe_src_param->viewport_height_c; + data_pitch = pipe_src_param->data_pitch_c; + meta_pitch = pipe_src_param->meta_pitch_c; } else { - vp_width = pipe_src_param.viewport_width / ppe; - vp_height = pipe_src_param.viewport_height; - data_pitch = pipe_src_param.data_pitch; - meta_pitch = pipe_src_param.meta_pitch; + vp_width = pipe_src_param->viewport_width / ppe; + vp_height = pipe_src_param->viewport_height; + data_pitch = pipe_src_param->data_pitch; + meta_pitch = pipe_src_param->meta_pitch; } rq_sizing_param->chunk_bytes = 8192; @@ -645,11 +645,11 @@ static void get_surf_rq_param( rq_sizing_param->mpte_group_bytes = 2048; - surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); - surf_vert = (pipe_src_param.source_scan == dm_vert); + surf_linear = (pipe_src_param->sw_mode == dm_sw_linear); + surf_vert = (pipe_src_param->source_scan == dm_vert); bytes_per_element = get_bytes_per_element( - (enum source_format_class) pipe_src_param.source_format, + (enum source_format_class) pipe_src_param->source_format, is_chroma); log2_bytes_per_element = dml_log2(bytes_per_element); blk256_width = 0; @@ -671,7 +671,7 @@ static void get_surf_rq_param( log2_blk256_height = dml_log2((double) blk256_height); blk_bytes = surf_linear ? 256 : get_blk_size_bytes( - (enum source_macro_tile_size) pipe_src_param.macro_tile_size); + (enum source_macro_tile_size) pipe_src_param->macro_tile_size); log2_blk_bytes = dml_log2((double) blk_bytes); log2_blk_height = 0; log2_blk_width = 0; @@ -682,7 +682,7 @@ static void get_surf_rq_param( * "/2" is like square root * blk is vertical biased */ - if (pipe_src_param.sw_mode != dm_sw_linear) + if (pipe_src_param->sw_mode != dm_sw_linear) log2_blk_height = log2_blk256_height + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1); else @@ -930,10 +930,10 @@ static void get_surf_rq_param( &func_meta_row_height, vp_width, data_pitch, - pipe_src_param.source_format, - pipe_src_param.sw_mode, - pipe_src_param.macro_tile_size, - pipe_src_param.source_scan, + pipe_src_param->source_format, + pipe_src_param->sw_mode, + pipe_src_param->macro_tile_size, + pipe_src_param->source_scan, is_chroma); /* Just a check to make sure this function and the new one give the same @@ -960,12 +960,12 @@ static void get_surf_rq_param( void dml1_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param) + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param) { /* get param for luma surface */ - rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 - || pipe_src_param.source_format == dm_420_10; - rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + rq_param->yuv420 = pipe_src_param->source_format == dm_420_8 + || pipe_src_param->source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10; get_surf_rq_param( mode_lib, @@ -975,7 +975,7 @@ void dml1_rq_dlg_get_rq_params( pipe_src_param, 0); - if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) { + if (is_dual_plane((enum source_format_class) pipe_src_param->source_format)) { /* get param for chroma surface */ get_surf_rq_param( mode_lib, @@ -988,7 +988,7 @@ void dml1_rq_dlg_get_rq_params( /* calculate how to split the det buffer space between luma and chroma */ handle_det_buf_split(mode_lib, rq_param, pipe_src_param); - print__rq_params_st(mode_lib, *rq_param); + print__rq_params_st(mode_lib, rq_param); } /* Note: currently taken in as is. @@ -998,26 +998,26 @@ void dml1_rq_dlg_get_dlg_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs, - const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param, - const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param, - const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param, + const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param, + const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param, + const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param, const bool cstate_en, const bool pstate_en, const bool vm_en, const bool iflip_en) { /* Timing */ - unsigned int htotal = e2e_pipe_param.pipe.dest.htotal; - unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end; - unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start; - unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end; - bool interlaced = e2e_pipe_param.pipe.dest.interlaced; + unsigned int htotal = e2e_pipe_param->pipe.dest.htotal; + unsigned int hblank_end = e2e_pipe_param->pipe.dest.hblank_end; + unsigned int vblank_start = e2e_pipe_param->pipe.dest.vblank_start; + unsigned int vblank_end = e2e_pipe_param->pipe.dest.vblank_end; + bool interlaced = e2e_pipe_param->pipe.dest.interlaced; unsigned int min_vblank = mode_lib->ip.min_vblank_lines; - double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz; - double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz; - double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz; - double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz; + double pclk_freq_in_mhz = e2e_pipe_param->pipe.dest.pixel_rate_mhz; + double refclk_freq_in_mhz = e2e_pipe_param->clks_cfg.refclk_mhz; + double dppclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dppclk_mhz; + double dispclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dispclk_mhz; double ref_freq_to_pix_freq; double prefetch_xy_calc_in_dcfclk; @@ -1160,13 +1160,13 @@ void dml1_rq_dlg_get_dlg_params( disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ - min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; - min_ttu_vblank = dlg_sys_param.t_urg_wm_us; + min_ttu_vblank = dlg_sys_param->t_urg_wm_us; if (cstate_en) - min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank); + min_ttu_vblank = dml_max(dlg_sys_param->t_sr_wm_us, min_ttu_vblank); if (pstate_en) - min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank); + min_ttu_vblank = dml_max(dlg_sys_param->t_mclk_wm_us, min_ttu_vblank); min_ttu_vblank = min_ttu_vblank + t_calc_us; min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; @@ -1197,59 +1197,59 @@ void dml1_rq_dlg_get_dlg_params( /* ------------------------- */ /* Prefetch Calc */ /* Source */ - dcc_en = e2e_pipe_param.pipe.src.dcc; + dcc_en = e2e_pipe_param->pipe.src.dcc; dual_plane = is_dual_plane( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format); + (enum source_format_class) e2e_pipe_param->pipe.src.source_format); mode_422 = 0; /* TODO */ - access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ + access_dir = (e2e_pipe_param->pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format, + (enum source_format_class) e2e_pipe_param->pipe.src.source_format, 0); bytes_per_element_c = get_bytes_per_element( - (enum source_format_class) e2e_pipe_param.pipe.src.source_format, + (enum source_format_class) e2e_pipe_param->pipe.src.source_format, 1); - vp_height_l = e2e_pipe_param.pipe.src.viewport_height; - vp_width_l = e2e_pipe_param.pipe.src.viewport_width; - vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c; - vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c; + vp_height_l = e2e_pipe_param->pipe.src.viewport_height; + vp_width_l = e2e_pipe_param->pipe.src.viewport_width; + vp_height_c = e2e_pipe_param->pipe.src.viewport_height_c; + vp_width_c = e2e_pipe_param->pipe.src.viewport_width_c; /* Scaling */ - htaps_l = e2e_pipe_param.pipe.scale_taps.htaps; - htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c; - hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio; - hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c; - vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio; - vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c; + htaps_l = e2e_pipe_param->pipe.scale_taps.htaps; + htaps_c = e2e_pipe_param->pipe.scale_taps.htaps_c; + hratios_l = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio; + hratios_c = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio_c; + vratio_l = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio; + vratio_c = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio_c; line_time_in_us = (htotal / pclk_freq_in_mhz); - vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit; - vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c; - vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot; - vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c; - - swath_height_l = rq_dlg_param.rq_l.swath_height; - swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; - dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; - meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; - meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; - - swath_height_c = rq_dlg_param.rq_c.swath_height; - swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; - dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; - dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; - - meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; - vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset; - vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width; - vready_offset = e2e_pipe_param.pipe.dest.vready_offset; + vinit_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit; + vinit_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_c; + vinit_bot_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot; + vinit_bot_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot_c; + + swath_height_l = rq_dlg_param->rq_l.swath_height; + swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub; + dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub; + meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub; + meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub; + + swath_height_c = rq_dlg_param->rq_c.swath_height; + swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub; + dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub; + + meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub; + vupdate_offset = e2e_pipe_param->pipe.dest.vupdate_offset; + vupdate_width = e2e_pipe_param->pipe.dest.vupdate_width; + vready_offset = e2e_pipe_param->pipe.dest.vready_offset; dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal; dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal; pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz; - vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start; + vstartup_start = e2e_pipe_param->pipe.dest.vstartup_start; if (interlaced) vstartup_start = vstartup_start / 2; @@ -1276,13 +1276,13 @@ void dml1_rq_dlg_get_dlg_params( dst_x_after_scaler = 0; dst_y_after_scaler = 0; - if (e2e_pipe_param.pipe.src.is_hsplit) + if (e2e_pipe_param->pipe.src.is_hsplit) dst_x_after_scaler = pixel_rate_delay_subtotal - + e2e_pipe_param.pipe.dest.recout_width; + + e2e_pipe_param->pipe.dest.recout_width; else dst_x_after_scaler = pixel_rate_delay_subtotal; - if (e2e_pipe_param.dout.output_format == dm_420) + if (e2e_pipe_param->dout.output_format == dm_420) dst_y_after_scaler = 1; else dst_y_after_scaler = 0; @@ -1334,7 +1334,7 @@ void dml1_rq_dlg_get_dlg_params( DTRACE( "DLG: %s: t_srx_delay_us = %3.2f", __func__, - (double) dlg_sys_param.t_srx_delay_us); + (double) dlg_sys_param->t_srx_delay_us); DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us); DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset); DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width); @@ -1408,12 +1408,12 @@ void dml1_rq_dlg_get_dlg_params( DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes); prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us; - flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw) - / (double) dlg_sys_param.total_flip_bytes; + flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param->total_flip_bw) + / (double) dlg_sys_param->total_flip_bytes; t_vm_us = line_time_in_us / 4.0; if (vm_en && dcc_en) { t_vm_us = dml_max( - dlg_sys_param.t_extra_us, + dlg_sys_param->t_extra_us, dml_max((double) vm_bytes / prefetch_bw, t_vm_us)); if (iflip_en && !dual_plane) { @@ -1423,12 +1423,12 @@ void dml1_rq_dlg_get_dlg_params( } } - t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us); + t_r0_us = dml_max(dlg_sys_param->t_extra_us - t_vm_us, line_time_in_us - t_vm_us); if (vm_en || dcc_en) { t_r0_us = dml_max( (double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw, - dlg_sys_param.t_extra_us); + dlg_sys_param->t_extra_us); t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us); if (iflip_en && !dual_plane) { @@ -1550,15 +1550,15 @@ void dml1_rq_dlg_get_dlg_params( disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */ /* Active */ - req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; - req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; - meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; + req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param->rq_l.meta_row_height; swath_width_pixels_ub_l = 0; swath_width_pixels_ub_c = 0; scaler_rec_in_width_l = 0; scaler_rec_in_width_c = 0; - dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; - dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height; disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l / (double) vratio_l * dml_pow(2, 2)); @@ -1650,14 +1650,14 @@ void dml1_rq_dlg_get_dlg_params( refcyc_per_req_delivery_cur0 = 0.; full_recout_width = 0; - if (e2e_pipe_param.pipe.src.is_hsplit) { - if (e2e_pipe_param.pipe.dest.full_recout_width == 0) { + if (e2e_pipe_param->pipe.src.is_hsplit) { + if (e2e_pipe_param->pipe.dest.full_recout_width == 0) { DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__); - full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */ + full_recout_width = e2e_pipe_param->pipe.dest.recout_width * 2; /* assume half split for dcn1 */ } else - full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width; + full_recout_width = e2e_pipe_param->pipe.dest.full_recout_width; } else - full_recout_width = e2e_pipe_param.pipe.dest.recout_width; + full_recout_width = e2e_pipe_param->pipe.dest.recout_width; refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery( mode_lib, @@ -1824,9 +1824,9 @@ void dml1_rq_dlg_get_dlg_params( } /* TTU - Cursor */ - hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio; - cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */ - cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp; + hratios_cur0 = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio; + cur0_src_width = e2e_pipe_param->pipe.src.cur0_src_width; /* cursor source width */ + cur0_bpp = (enum cursor_bpp) e2e_pipe_param->pipe.src.cur0_bpp; cur0_req_size = 0; cur0_req_width = 0; cur0_width_ub = 0.0; @@ -1927,6 +1927,6 @@ void dml1_rq_dlg_get_dlg_params( disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); - print__ttu_regs_st(mode_lib, *disp_ttu_regs); - print__dlg_regs_st(mode_lib, *disp_dlg_regs); + print__ttu_regs_st(mode_lib, disp_ttu_regs); + print__dlg_regs_st(mode_lib, disp_dlg_regs); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h index 9c06913ad767..e19ee3bde45f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h @@ -33,7 +33,7 @@ struct display_mode_lib; void dml1_extract_rq_regs( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_regs_st *rq_regs, - const struct _vcs_dpi_display_rq_params_st rq_param); + const struct _vcs_dpi_display_rq_params_st *rq_param); /* Function: dml_rq_dlg_get_rq_params * Calculate requestor related parameters that register definition agnostic * (i.e. this layer does try to separate real values from register definition) @@ -45,7 +45,7 @@ void dml1_extract_rq_regs( void dml1_rq_dlg_get_rq_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_rq_params_st *rq_param, - const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param); + const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param); /* Function: dml_rq_dlg_get_dlg_params @@ -55,9 +55,9 @@ void dml1_rq_dlg_get_dlg_params( struct display_mode_lib *mode_lib, struct _vcs_dpi_display_dlg_regs_st *dlg_regs, struct _vcs_dpi_display_ttu_regs_st *ttu_regs, - const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param, - const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param, - const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param, + const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param, + const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param, + const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param, const bool cstate_en, const bool pstate_en, const bool vm_en, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c new file mode 100644 index 000000000000..ece34b0b8a46 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -0,0 +1,1889 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dml_wrapper.h" +#include "resource.h" +#include "core_types.h" +#include "dsc.h" +#include "clk_mgr.h" + +#ifndef DC_LOGGER_INIT +#define DC_LOGGER_INIT +#undef DC_LOG_WARNING +#define DC_LOG_WARNING +#endif + +#define DML_WRAPPER_TRANSLATION_ +#include "dml_wrapper_translation.c" +#undef DML_WRAPPER_TRANSLATION_ + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ + stream->clamping.clamping_level = CLAMPING_FULL_RANGE; + stream->clamping.c_depth = stream->timing.display_color_depth; + stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +static void get_pixel_clock_parameters( + const struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + + /*TODO: is this halved for YCbCr 420? in that case we might want to move + * the pixel clock normalization for hdmi up to here instead of doing it + * in pll_adjust_pix_clk + */ + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420); + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { + pixel_clk_params->color_depth = COLOR_DEPTH_888; + } + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; + } + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + + if (pipe_ctx->clock_source) + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); + + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + build_clamping_params(pipe_ctx->stream); + + return DC_OK; +} + +static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, + struct bit_depth_reduction_params *fmt_bit_depth) +{ + enum dc_dither_option option = stream->dither_option; + enum dc_pixel_encoding pixel_encoding = + stream->timing.pixel_encoding; + + memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth)); + + if (option == DITHER_OPTION_DEFAULT) { + switch (stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + option = DITHER_OPTION_SPATIAL6; + break; + case COLOR_DEPTH_888: + option = DITHER_OPTION_SPATIAL8; + break; + case COLOR_DEPTH_101010: + option = DITHER_OPTION_SPATIAL10; + break; + default: + option = DITHER_OPTION_DISABLE; + } + } + + if (option == DITHER_OPTION_DISABLE) + return; + + if (option == DITHER_OPTION_TRUN6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 0; + } else if (option == DITHER_OPTION_TRUN8 || + option == DITHER_OPTION_TRUN8_SPATIAL6 || + option == DITHER_OPTION_TRUN8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 1; + } else if (option == DITHER_OPTION_TRUN10 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_FM8 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + } + + /* special case - Formatter can only reduce by 4 bits at most. + * When reducing from 12 to 6 bits, + * HW recommends we use trunc with round mode + * (if we did nothing, trunc to 10 bits would be used) + * note that any 12->10 bit reduction is ignored prior to DCE8, + * as the input was 10 bits. + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + fmt_bit_depth->flags.TRUNCATE_MODE = 1; + } + + /* spatial dither + * note that spatial modes 1-3 are never used + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN8_SPATIAL6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL10 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_SPATIAL10_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } + + if (option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL10) { + fmt_bit_depth->flags.FRAME_RANDOM = 0; + } else { + fmt_bit_depth->flags.FRAME_RANDOM = 1; + } + + ////////////////////// + //// temporal dither + ////////////////////// + if (option == DITHER_OPTION_FM6 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_SPATIAL10_FM6 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0; + } else if (option == DITHER_OPTION_FM8 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_TRUN10_FM8) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1; + } else if (option == DITHER_OPTION_FM10) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2; + } + + fmt_bit_depth->pixel_encoding = pixel_encoding; +} + +bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +{ + int i; + + /* Validate DSC config, dsc count validation is already done */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dsc_config dsc_cfg; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + /* Only need to validate top pipe */ + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) + continue; + + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) + return false; + } + return true; +} + +enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) +{ + enum dc_status status = DC_OK; + struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + + status = build_pipe_hw_param(pipe_ctx); + + return status; +} + +void dml_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx) +{ + int i; + const struct resource_pool *pool = dc->res_pool; + struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; + + ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ + *dsc = NULL; + + /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + + /* Return old DSC to avoid the need for redo it */ + if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { + *dsc = dsc_old; + res_ctx->is_dsc_acquired[dsc_old->inst] = true; + return ; + } + + /* Find first free DSC */ + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (!res_ctx->is_dsc_acquired[i]) { + *dsc = pool->dscs[i]; + res_ctx->is_dsc_acquired[i] = true; + break; + } +} + +static bool dml_split_stream_for_mpc_or_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *pri_pipe, + struct pipe_ctx *sec_pipe, + bool odm) +{ + int pipe_idx = sec_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; + + *sec_pipe = *pri_pipe; + + sec_pipe->pipe_idx = pipe_idx; + sec_pipe->plane_res.mi = pool->mis[pipe_idx]; + sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; + sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; + sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; + sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; + sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; + sec_pipe->stream_res.dsc = NULL; + if (odm) { + if (pri_pipe->next_odm_pipe) { + ASSERT(pri_pipe->next_odm_pipe != sec_pipe); + sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; + sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; + } + if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { + pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; + } + if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { + pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; + } + pri_pipe->next_odm_pipe = sec_pipe; + sec_pipe->prev_odm_pipe = pri_pipe; + ASSERT(sec_pipe->top_pipe == NULL); + + if (!sec_pipe->top_pipe) + sec_pipe->stream_res.opp = pool->opps[pipe_idx]; + else + sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; + if (sec_pipe->stream->timing.flags.DSC == 1) { + dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); + ASSERT(sec_pipe->stream_res.dsc); + if (sec_pipe->stream_res.dsc == NULL) + return false; + } + } else { + if (pri_pipe->bottom_pipe) { + ASSERT(pri_pipe->bottom_pipe != sec_pipe); + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; + sec_pipe->bottom_pipe->top_pipe = sec_pipe; + } + pri_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe; + + ASSERT(pri_pipe->plane_state); + } + + return true; +} + +static struct pipe_ctx *dml_find_split_pipe( + struct dc *dc, + struct dc_state *context, + int old_index) +{ + struct pipe_ctx *pipe = NULL; + int i; + + if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[old_index]; + pipe->pipe_idx = old_index; + } + + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + } + + /* + * May need to fix pipes getting tossed from 1 opp to another on flip + * Add for debugging transient underflow during topology updates: + * ASSERT(pipe); + */ + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + + return pipe; +} + +static void dml_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc) +{ + int i; + + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (pool->dscs[i] == *dsc) { + res_ctx->is_dsc_acquired[i] = false; + *dsc = NULL; + break; + } +} + +static int dml_get_num_mpc_splits(struct pipe_ctx *pipe) +{ + int mpc_split_count = 0; + struct pipe_ctx *other_pipe = pipe->bottom_pipe; + + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->bottom_pipe; + } + other_pipe = pipe->top_pipe; + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->top_pipe; + } + + return mpc_split_count; +} + +static bool dml_enough_pipes_for_subvp(struct dc *dc, + struct dc_state *context) +{ + int i = 0; + int num_pipes = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + num_pipes++; + } + + // Sub-VP only possible if the number of "real" pipes is + // less than or equal to half the number of available pipes + if (num_pipes * 2 > dc->res_pool->pipe_count) + return false; + + return true; +} + +static int dml_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + int *split, + bool *merge) +{ + int i, pipe_idx, vlevel_split; + int plane_count = 0; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; + struct vba_vars_st *v = &context->bw_ctx.dml.vba; + int max_mpc_comb = v->maxMpcComb; + + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) + avoid_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; + } + if (plane_count > dc->res_pool->pipe_count / 2) + avoid_split = true; + + /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_crtc_timing timing; + + if (!pipe->stream) + continue; + else { + timing = pipe->stream->timing; + if (timing.h_border_left + timing.h_border_right + + timing.v_border_top + timing.v_border_bottom > 0) { + avoid_split = true; + break; + } + } + } + + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && + v->ModeSupport[vlevel][0]) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + else + max_mpc_comb = 0; + pipe_idx++; + } + v->maxMpcComb = max_mpc_comb; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = v->pipe_plane[pipe_idx]; + bool split4mpc = context->stream_count == 1 && plane_count == 1 + && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) + split[i] = 4; + else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) + split[i] = 2; + + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = 2; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = 2; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; + } + if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { + split[i] = 4; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; + } + /*420 format workaround*/ + if (pipe->stream->timing.h_addressable > 7680 && + pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + split[i] = 4; + } + + v->ODMCombineEnabled[pipe_plane] = + v->ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for mpc but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 MPC */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 MPC */ + else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 2 -> 1 MPC */ + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for mpc but 4 way split already*/ + if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) + || !pipe->bottom_pipe)) { + merge[i] = true; /* 4 -> 2 MPC */ + } else if (split[i] == 0 && pipe->top_pipe && + pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 4 -> 1 MPC */ + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* ODM -> MPC transition */ + if (pipe->prev_odm_pipe) { + split[i] = 0; + merge[i] = true; + } + } + } else { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for odm but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 ODM */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 ODM */ + else if (pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for odm but 4 way split already*/ + if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) + || !pipe->next_odm_pipe)) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* 4 -> 2 ODM */ + } else if (split[i] == 0 && pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* MPC -> ODM transition */ + ASSERT(0); /* NOT expected yet */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + split[i] = 0; + merge[i] = true; + } + } + } + + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; + pipe_idx++; + } + + return vlevel; +} + +static void dml_set_phantom_stream_timing(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe, + struct dc_stream_state *phantom_stream) +{ + // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width + uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 + + dc->caps.subvp_fw_processing_delay_us + + dc->caps.subvp_pstate_allow_width_us; + uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) * + (ref_pipe->stream->timing.pix_clk_100hz * 100) / + (double)ref_pipe->stream->timing.h_total; + uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start; + + phantom_stream->dst.y = 0; + phantom_stream->dst.height = phantom_vactive; + phantom_stream->src.y = 0; + phantom_stream->src.height = phantom_vactive; + + phantom_stream->timing.v_addressable = phantom_vactive; + phantom_stream->timing.v_front_porch = 1; + phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + + phantom_stream->timing.v_front_porch + + phantom_stream->timing.v_sync_width + + phantom_bp; +} + +static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe) +{ + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; + ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; + ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + + /* stream has limited viewport and small timing */ + memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); + memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); + memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); + dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream); + + dc_add_stream_to_ctx(dc, context, phantom_stream); + dc->hwss.apply_ctx_to_hw(dc, context); + return phantom_stream; +} + +static void dml_enable_phantom_plane(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *phantom_stream, + struct pipe_ctx *main_pipe) +{ + struct dc_plane_state *phantom_plane = NULL; + struct dc_plane_state *prev_phantom_plane = NULL; + struct pipe_ctx *curr_pipe = main_pipe; + + while (curr_pipe) { + if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) + phantom_plane = prev_phantom_plane; + else + phantom_plane = dc_create_plane_state(dc); + + memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); + memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, + sizeof(phantom_plane->scaling_quality)); + memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); + memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); + memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); + memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, + sizeof(phantom_plane->plane_size)); + memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, + sizeof(phantom_plane->tiling_info)); + memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); + /* Currently compat_level is undefined in dc_state + * phantom_plane->compat_level = curr_pipe->plane_state->compat_level; + */ + phantom_plane->format = curr_pipe->plane_state->format; + phantom_plane->rotation = curr_pipe->plane_state->rotation; + phantom_plane->visible = curr_pipe->plane_state->visible; + + /* Shadow pipe has small viewport. */ + phantom_plane->clip_rect.y = 0; + phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; + + dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + + curr_pipe = curr_pipe->bottom_pipe; + prev_phantom_plane = phantom_plane; + } +} + +static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_stream_state *ref_stream = pipe->stream; + // Only construct phantom stream for top pipes that have plane enabled + if (!pipe->top_pipe && pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_NONE) { + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dml_enable_phantom_stream(dc, context, pipe); + dml_enable_phantom_plane(dc, context, phantom_stream, pipe); + } + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe->stream->use_dynamic_meta = false; + pipe->plane_state->flip_immediate = false; + if (!resource_build_scaling_params(pipe)) { + // Log / remove phantom pipes since failed to build scaling params + } + } + } +} + +static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + bool removed_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + // build scaling params for phantom pipes + if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_rem_all_planes_for_stream(dc, pipe->stream, context); + dc_remove_stream_from_ctx(dc, context, pipe->stream); + removed_pipe = true; + } + + // Clear all phantom stream info + if (pipe->stream) { + pipe->stream->mall_stream_config.type = SUBVP_NONE; + pipe->stream->mall_stream_config.paired_stream = NULL; + } + } + if (removed_pipe) + dc->hwss.apply_ctx_to_hw(dc, context); +} + +/* + * If the input state contains no upstream planes for a particular pipe (i.e. only timing) + * we need to populate some "conservative" plane information as DML cannot handle "no planes" + */ +static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe) +{ + pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled; + pipe->src.source_scan = dm_horz; + pipe->src.sw_mode = dm_sw_4kb_s; + pipe->src.macro_tile_size = dm_64k_tile; + pipe->src.viewport_width = timing->h_addressable; + if (pipe->src.viewport_width > 1920) + pipe->src.viewport_width = 1920; + pipe->src.viewport_height = timing->v_addressable; + if (pipe->src.viewport_height > 1080) + pipe->src.viewport_height = 1080; + pipe->src.surface_height_y = pipe->src.viewport_height; + pipe->src.surface_width_y = pipe->src.viewport_width; + pipe->src.surface_height_c = pipe->src.viewport_height; + pipe->src.surface_width_c = pipe->src.viewport_width; + pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256; + pipe->src.source_format = dm_444_32; + pipe->dest.recout_width = pipe->src.viewport_width; + pipe->dest.recout_height = pipe->src.viewport_height; + pipe->dest.full_recout_width = pipe->dest.recout_width; + pipe->dest.full_recout_height = pipe->dest.recout_height; + pipe->scale_ratio_depth.lb_depth = dm_lb_16; + pipe->scale_ratio_depth.hscl_ratio = 1.0; + pipe->scale_ratio_depth.vscl_ratio = 1.0; + pipe->scale_ratio_depth.scl_enable = 0; + pipe->scale_taps.htaps = 1; + pipe->scale_taps.vtaps = 1; + pipe->dest.vtotal_min = timing->v_total; + pipe->dest.vtotal_max = timing->v_total; + + if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) { + pipe->src.viewport_width /= 2; + pipe->dest.recout_width /= 2; + } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) { + pipe->src.viewport_width /= 4; + pipe->dest.recout_width /= 4; + } + + pipe->src.dcc = false; + pipe->src.dcc_rate = 1; +} + +/* + * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its + * hsplit group is equal to its own pipe ID + * Otherwise, all pipes part of the same blending tree have the same hsplit group + * ID as the top most pipe + * + * If the pipe ctx is ODM combined, then similar logic follows + */ +static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + + if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe; + int split_idx = 0; + + while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + first_pipe = first_pipe->top_pipe; + split_idx++; + } + + /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ + if (split_idx == 0) + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + else if (split_idx == 1) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + else if (split_idx == 2) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx; + + } else if (dc_pipe_ctx->prev_odm_pipe) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + } +} + +static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale) +{ + const struct dc_plane_state *pln = dc_pipe_ctx->plane_state; + const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data; + + e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate; + e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln) + || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln) + || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + + /* stereo is not split */ + if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + e2e_pipe->pipe.src.is_hsplit = false; + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + } + + e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 + || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; + e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y; + e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y; + e2e_pipe->pipe.src.viewport_width = scl->viewport.width; + e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width; + e2e_pipe->pipe.src.viewport_height = scl->viewport.height; + e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height; + e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width; + e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height; + e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width; + e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height; + e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height; + + if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA + || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; + } else { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + } + e2e_pipe->pipe.src.dcc = pln->dcc.enable; + e2e_pipe->pipe.src.dcc_rate = 1; + e2e_pipe->pipe.dest.recout_width = scl->recout.width; + e2e_pipe->pipe.dest.recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_width = scl->recout.width; + if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1) + e2e_pipe->pipe.dest.full_recout_width *= 2; + else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1) + e2e_pipe->pipe.dest.full_recout_width *= 4; + else { + struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe; + + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->bottom_pipe; + } + split_pipe = dc_pipe_ctx->top_pipe; + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->top_pipe; + } + } + + e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16; + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.scl_enable = + scl->ratios.vert.value != dc_fixpt_one.value + || scl->ratios.horz.value != dc_fixpt_one.value + || scl->ratios.vert_c.value != dc_fixpt_one.value + || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ + || always_scale; /*support always scale*/ + e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps; + e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c; + e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps; + e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; + + /* Currently compat_level is not defined. Commenting it until further resolution + * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) { + swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = + swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); + } else { + gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode, + pln->compat_level, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile; + }*/ + + e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format); +} + +static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + /* + * For graphic plane, cursor number is 1, nv12 is 0 + * bw calculations due to cursor on/off + */ + if (dc_pipe_ctx->plane_state && + (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || + dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)) + e2e_pipe->pipe.src.num_cursors = 0; + else + e2e_pipe->pipe.src.num_cursors = 1; + + e2e_pipe->pipe.src.cur0_src_width = 256; + e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit; +} + +static int populate_dml_pipes_from_context_base( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int pipe_cnt, i; + bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + if (pipe_cnt < 0) { + pipe_cnt = i; + continue; + } + + if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) + continue; + + if (dc->debug.disable_timing_sync || + (!resource_are_streams_timing_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream) && + !resource_are_vblanks_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream))) { + synchronized_vblank = false; + break; + } + } + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + + struct audio_check aud_check = {0}; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /* todo: + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; + pipes[pipe_cnt].pipe.src.dcc = 0; + pipes[pipe_cnt].pipe.src.vm = 0;*/ + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; + /* todo: rotation?*/ + pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; + if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; + /* 1/2 vblank */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = + (timing->v_total - timing->v_addressable + - timing->v_border_top - timing->v_border_bottom) / 2; + /* 36 bytes dp, 32 hdmi */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = + dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; + } + pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + + dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest); + pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; + + pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; + + pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]); + + populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + pipes[pipe_cnt].dout.dp_lanes = 4; + pipes[pipe_cnt].dout.is_virtual = 0; + pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal); + if (pipes[pipe_cnt].dout.output_type < 0) { + pipes[pipe_cnt].dout.output_type = dm_dp; + pipes[pipe_cnt].dout.is_virtual = 1; + } + + populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout); + + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + + /* todo: default max for now, until there is logic reflecting this in dc*/ + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + /*fill up the audio sample rate (unit in kHz)*/ + get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); + pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; + + populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + if (!res_ctx->pipe_ctx[i].plane_state) { + populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe); + } else { + populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale); + } + + pipe_cnt++; + } + + /* populate writeback information */ + if (dc->res_pool) + dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); + + return pipe_cnt; +} + +static int dml_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + + populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + pipe_cnt++; + } + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + } else if (!is_dual_plane(pipe->plane_state->format)) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static void dml_full_validate_bw_helper(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *vlevel, + int *split, + bool *merge, + int *pipe_cnt) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + /* + * DML favors voltage over p-state, but we're more interested in + * supporting p-state over voltage. We can't support p-state in + * prefetch mode > 0 so try capping the prefetch mode to start. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh_and_mclk_switch; + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + /* This may adjust vlevel and maxMpcComb */ + if (*vlevel < context->bw_ctx.dml.soc.num_states) + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + + /* Conditions for setting up phantom pipes for SubVP: + * 1. Not force disable SubVP + * 2. Full update (i.e. !fast_validate) + * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) + * 4. Display configuration passes validation + * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) + */ + if (!dc->debug.force_disable_subvp && + dml_enough_pipes_for_subvp(dc, context) && + *vlevel < context->bw_ctx.dml.soc.num_states && + (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || + dc->debug.force_subvp_mclk_switch)) { + + dml_add_phantom_pipes(dc, context); + + /* Create input to DML based on new context which includes phantom pipes + * TODO: Input to DML should mark which pipes are phantom + */ + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + if (*vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + } + + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) + // remove phantom pipes and repopulate dml pipes + if (*vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + dml_remove_phantom_pipes(dc, context); + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + } + } +} + +static void dcn20_adjust_adaptive_sync_v_startup( + const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + +static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->stream->link->hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) +{ + int i; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; +#if defined (CONFIG_DRM_AMD_DC_DP2_0) + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; +#endif + } + return false; +} + +static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; + } +} + +static bool dml_internal_validate( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + bool newly_split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + ASSERT(pipes); + if (!pipes) + return false; + + // For each full update, remove all existing phantom pipes first + dml_remove_phantom_pipes(dc, context); + + dml_update_soc_for_wm_a(dc, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + // On initial pass through DML, we intend to use MALL for SS on all + // (non-PSR) surfaces with none using MALL for P-State + // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state + //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) + // pipe->plane_state->mall_plane_config.use_mall_for_ss = true; + } + } + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (!fast_validate) { + dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); + } + + if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + /* + * If mode is unsupported or there's still no p-state support then + * fall back to favoring voltage. + * + * We don't actually support prefetch mode 2, so require that we + * at least support prefetch mode 1. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh; + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + } + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_res.scl_data.recout, + &pipe->plane_res.scl_data.recout, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + /* merge pipes if necessary */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /*skip pipes that don't need merging*/ + if (!merge[i]) + continue; + + /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ + if (pipe->prev_odm_pipe) { + /*split off odm pipe*/ + pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; + if (pipe->next_odm_pipe) + pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; + + pipe->bottom_pipe = NULL; + pipe->next_odm_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + pipe->top_pipe = NULL; + pipe->prev_odm_pipe = NULL; + if (pipe->stream_res.dsc) + dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + struct pipe_ctx *top_pipe = pipe->top_pipe; + struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; + + top_pipe->bottom_pipe = bottom_pipe; + if (bottom_pipe) + bottom_pipe->top_pipe = top_pipe; + + pipe->top_pipe = NULL; + pipe->bottom_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else + ASSERT(0); /* Should never try to merge master pipe */ + + } + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = NULL; + bool odm; + int old_index = -1; + + if (!pipe->stream || newly_split[i]) + continue; + + pipe_idx++; + odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; + + if (!pipe->plane_state && !odm) + continue; + + if (split[i]) { + if (odm) { + if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + } else { + if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else if (old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + } + hsplit_pipe = dml_find_split_pipe(dc, context, old_index); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) + goto validate_fail; + + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe, odm)) + goto validate_fail; + + newly_split[hsplit_pipe->pipe_idx] = true; + repopulate_pipes = true; + } + if (split[i] == 4) { + struct pipe_ctx *pipe_4to1; + + if (odm && old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + + if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + hsplit_pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + } + if (odm) + dml_build_mapped_resource(dc, context, pipe->stream); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + if (!resource_build_scaling_params(pipe)) + goto validate_fail; + } + } + + /* Actual dsc count per stream dsc validation*/ + if (!dml_validate_dsc(dc, context)) { + vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + if (repopulate_pipes) + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + *vlevel_out = vlevel; + *pipe_cnt_out = pipe_cnt; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +static void dml_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx; + int plane_count; + + /* Writeback MCIF_WB arbitration parameters */ + if (dc->res_pool) + dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; + context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; + context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; + context->bw_ctx.bw.dcn.clk.p_state_change_support = + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] + != dm_dram_clock_change_unsupported; + + context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; + /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks + * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ? + DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW; + */ + plane_count = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /* Commented out as per above error for now. + if (plane_count == 0) + context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW; + */ + context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); + /* TODO : Uncomment the below line and make changes + * as per DML nomenclature once it is available. + * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support; + */ + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; + context->res_ctx.pipe_ctx[i].unbounded_req = false; + } else { + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; + context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; + } + + if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = + pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + pipe_idx++; + } + /*save a original dppclock copy*/ + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes + - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs, + pipes, + pipe_cnt, + pipe_idx, + cstate_en, + context->bw_ctx.bw.dcn.clk.p_state_change_support, + false, false, true); + + context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].rq_regs, + &pipes[pipe_idx].pipe); + pipe_idx++; + } +} + +static void dml_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx, vlevel_temp = 0; + + double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; + double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != + dm_dram_clock_change_unsupported; + + /* Set B: + * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, + * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark + * calculations to cover bootup clocks. + * DCFCLK: soc.clock_limits[2] when available + * UCLK: soc.clock_limits[2] when available + */ + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 2; + dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8; + + /* Set D: + * All clocks min. + * DCFCLK: Min, as reported by PM FW when available + * UCLK : Min, as reported by PM FW when available + * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) + */ + + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 0; + dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8; + + /* Set C, for Dummy P-State: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK : Min, as reported by PM FW, when available + * pstate latency as per UCLK state dummy pstate latency + */ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { + unsigned int min_dram_speed_mts_margin = 160; + + if ((!pstate_en)) + min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16; + + /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */ + for (i = 3; i > 0; i--) + if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) + break; + + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8; + + if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { + /* The only difference between A and C is p-state latency, if p-state is not supported + * with full p-state latency we want to calculate DLG based on dummy p-state latency, + * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation. + */ + context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; + } else { + /* Set A: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK: Min, as reported by PM FW, when available + */ + dml_update_soc_for_wm_a(dc, context); + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + } + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + + dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + if (!pstate_en) + /* Restore full p-state latency */ + context->bw_ctx.dml.soc.dram_clock_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +} + +bool dml_validate(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state; + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + BW_VAL_TRACE_FINISH(); + + return out; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c new file mode 100644 index 000000000000..4ec5310a2962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c @@ -0,0 +1,284 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifdef DML_WRAPPER_TRANSLATION_ + +static void gfx10array_mode_to_dml_params( + enum array_mode_values array_mode, + enum legacy_tiling_compat_level compat_level, + unsigned int *sw_mode) +{ + switch (array_mode) { + case DC_ARRAY_LINEAR_ALLIGNED: + case DC_ARRAY_LINEAR_GENERAL: + *sw_mode = dm_sw_linear; + break; + case DC_ARRAY_2D_TILED_THIN1: +// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed +#if 0 + if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO) + *sw_mode = dm_sw_gfx7_2d_thin_l_vp; + else + *sw_mode = dm_sw_gfx7_2d_thin_gl; +#endif + break; + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void swizzle_to_dml_params( + enum swizzle_mode_values swizzle, + unsigned int *sw_mode) +{ + switch (swizzle) { + case DC_SW_LINEAR: + *sw_mode = dm_sw_linear; + break; + case DC_SW_4KB_S: + *sw_mode = dm_sw_4kb_s; + break; + case DC_SW_4KB_S_X: + *sw_mode = dm_sw_4kb_s_x; + break; + case DC_SW_4KB_D: + *sw_mode = dm_sw_4kb_d; + break; + case DC_SW_4KB_D_X: + *sw_mode = dm_sw_4kb_d_x; + break; + case DC_SW_64KB_S: + *sw_mode = dm_sw_64kb_s; + break; + case DC_SW_64KB_S_X: + *sw_mode = dm_sw_64kb_s_x; + break; + case DC_SW_64KB_S_T: + *sw_mode = dm_sw_64kb_s_t; + break; + case DC_SW_64KB_D: + *sw_mode = dm_sw_64kb_d; + break; + case DC_SW_64KB_D_X: + *sw_mode = dm_sw_64kb_d_x; + break; + case DC_SW_64KB_D_T: + *sw_mode = dm_sw_64kb_d_t; + break; + case DC_SW_64KB_R_X: + *sw_mode = dm_sw_64kb_r_x; + break; + case DC_SW_VAR_S: + *sw_mode = dm_sw_var_s; + break; + case DC_SW_VAR_S_X: + *sw_mode = dm_sw_var_s_x; + break; + case DC_SW_VAR_D: + *sw_mode = dm_sw_var_d; + break; + case DC_SW_VAR_D_X: + *sw_mode = dm_sw_var_d_x; + break; + + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest) +{ + dest->hblank_start = timing->h_total - timing->h_front_porch; + dest->hblank_end = dest->hblank_start + - timing->h_addressable + - timing->h_border_left + - timing->h_border_right; + dest->vblank_start = timing->v_total - timing->v_front_porch; + dest->vblank_end = dest->vblank_start + - timing->v_addressable + - timing->v_border_top + - timing->v_border_bottom; + dest->htotal = timing->h_total; + dest->vtotal = timing->v_total; + dest->hactive = timing->h_addressable; + dest->vactive = timing->v_addressable; + dest->interlaced = timing->flags.INTERLACE; + dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0; + if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + dest->pixel_rate_mhz *= 2; +} + +static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe) +{ + int odm_split_count = 0; + enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + + // Traverse pipe tree to determine odm split count + while (next_pipe) { + odm_split_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_split_count++; + pipe = pipe->prev_odm_pipe; + } + + // Translate split to DML odm combine factor + switch (odm_split_count) { + case 1: + combine_mode = dm_odm_combine_mode_2to1; + break; + case 3: + combine_mode = dm_odm_combine_mode_4to1; + break; + default: + combine_mode = dm_odm_combine_mode_disabled; + } + + return combine_mode; +} + +static int get_dml_output_type(enum signal_type dc_signal) +{ + int dml_output_type = -1; + + switch (dc_signal) { + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_DISPLAY_PORT: + dml_output_type = dm_dp; + break; + case SIGNAL_TYPE_EDP: + dml_output_type = dm_edp; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + dml_output_type = dm_hdmi; + break; + default: + break; + } + + return dml_output_type; +} + +static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout) +{ + int output_bpc = 0; + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + output_bpc = 6; + break; + case COLOR_DEPTH_888: + output_bpc = 8; + break; + case COLOR_DEPTH_101010: + output_bpc = 10; + break; + case COLOR_DEPTH_121212: + output_bpc = 12; + break; + case COLOR_DEPTH_141414: + output_bpc = 14; + break; + case COLOR_DEPTH_161616: + output_bpc = 16; + break; + case COLOR_DEPTH_999: + output_bpc = 9; + break; + case COLOR_DEPTH_111111: + output_bpc = 11; + break; + default: + output_bpc = 8; + break; + } + + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + break; + case PIXEL_ENCODING_YCBCR420: + dout->output_format = dm_420; + dout->output_bpp = (output_bpc * 3.0) / 2; + break; + case PIXEL_ENCODING_YCBCR422: + if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple) + dout->output_format = dm_n422; + else + dout->output_format = dm_s422; + dout->output_bpp = output_bpc * 2; + break; + default: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + } +} + +static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format) +{ + enum source_format_class dml_format = dm_444_32; + + switch (dc_format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + dml_format = dm_420_8; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + dml_format = dm_420_10; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + dml_format = dm_444_64; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + dml_format = dm_444_16; + break; + case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: + dml_format = dm_444_8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + dml_format = dm_rgbe_alpha; + break; + default: + dml_format = dm_444_32; + break; + } + + return dml_format; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h index e5fac9f4181d..e5fac9f4181d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c new file mode 100644 index 000000000000..122ba291a7ef --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -0,0 +1,260 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "rc_calc_fpu.h" + +#include "qp_tables.h" +#include "amdgpu_dm/dc_fpu.h" + +#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) + +#define MODE_SELECT(val444, val422, val420) \ + (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) + + +#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ + table = qp_table_##mode##_##bpc##bpc_##max; \ + table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ + break + +static int median3(int a, int b, int c) +{ + if (a > b) + swap(a, b); + if (b > c) + swap(b, c); + if (a > b) + swap(b, c); + + return b; +} + +static double dsc_roundf(double num) +{ + if (num < 0.0) + num = num - 0.5; + else + num = num + 0.5; + + return (int)(num); +} + +static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, + enum max_min max_min, float bpp) +{ + int mode = MODE_SELECT(444, 422, 420); + int sel = table_hash(mode, bpc, max_min); + int table_size = 0; + int index; + const struct qp_entry *table = 0L; + + // alias enum + enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; + switch (sel) { + TABLE_CASE(444, 8, max); + TABLE_CASE(444, 8, min); + TABLE_CASE(444, 10, max); + TABLE_CASE(444, 10, min); + TABLE_CASE(444, 12, max); + TABLE_CASE(444, 12, min); + TABLE_CASE(422, 8, max); + TABLE_CASE(422, 8, min); + TABLE_CASE(422, 10, max); + TABLE_CASE(422, 10, min); + TABLE_CASE(422, 12, max); + TABLE_CASE(422, 12, min); + TABLE_CASE(420, 8, max); + TABLE_CASE(420, 8, min); + TABLE_CASE(420, 10, max); + TABLE_CASE(420, 10, min); + TABLE_CASE(420, 12, max); + TABLE_CASE(420, 12, min); + } + + if (table == 0) + return; + + index = (bpp - table[0].bpp) * 2; + + /* requested size is bigger than the table */ + if (index >= table_size) { + dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); + return; + } + + memcpy(qps, table[index].qps, sizeof(qp_set)); +} + +static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) +{ + int *p = ofs; + + if (mode == CM_444 || mode == CM_RGB) { + *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); + *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); + *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); + *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else if (mode == CM_422) { + *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); + *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); + *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } else { + *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); + *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); + *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); + *p++ = -10; + *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); + *p++ = -12; + *p++ = -12; + *p++ = -12; + } +} + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version) +{ + float bpp; + float bpp_group; + float initial_xmit_delay_factor; + int padding_pixels; + int i; + + dc_assert_fp_enabled(); + + bpp = ((float)drm_bpp / 16.0); + /* in native_422 or native_420 modes, the bits_per_pixel is double the + * target bpp (the latter is what calc_rc_params expects) + */ + if (is_navite_422_or_420) + bpp /= 2.0; + + rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + + bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); + + switch (cm) { + case CM_420: + rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); + break; + case CM_422: + rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + case CM_444: + case CM_RGB: + rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); + rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); + rc->second_line_bpg_offset = 0; + break; + } + + initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; + rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); + + if (cm == CM_422 || cm == CM_420) + slice_width /= 2; + + padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; + if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { + if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) + rc->initial_xmit_delay++; + } + + rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); + rc->flatness_det_thresh = 2 << (bpc - 8); + + get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); + get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); + if (cm == CM_444 && minor_version == 1) { + for (i = 0; i < QP_SET_SIZE; ++i) { + rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; + rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; + } + } + get_ofs_set(rc->ofs, cm, bpp); + + /* fixed parameters */ + rc->rc_model_size = 8192; + rc->rc_edge_factor = 6; + rc->rc_tgt_offset_hi = 3; + rc->rc_tgt_offset_lo = 3; + + rc->rc_buf_thresh[0] = 896; + rc->rc_buf_thresh[1] = 1792; + rc->rc_buf_thresh[2] = 2688; + rc->rc_buf_thresh[3] = 3584; + rc->rc_buf_thresh[4] = 4480; + rc->rc_buf_thresh[5] = 5376; + rc->rc_buf_thresh[6] = 6272; + rc->rc_buf_thresh[7] = 6720; + rc->rc_buf_thresh[8] = 7168; + rc->rc_buf_thresh[9] = 7616; + rc->rc_buf_thresh[10] = 7744; + rc->rc_buf_thresh[11] = 7872; + rc->rc_buf_thresh[12] = 8000; + rc->rc_buf_thresh[13] = 8064; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h new file mode 100644 index 000000000000..cad244c023cd --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -0,0 +1,90 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __RC_CALC_FPU_H__ +#define __RC_CALC_FPU_H__ + +#include "os_types.h" +#include <drm/drm_dsc.h> + +#define QP_SET_SIZE 15 + +typedef int qp_set[QP_SET_SIZE]; + +struct rc_params { + int rc_quant_incr_limit0; + int rc_quant_incr_limit1; + int initial_fullness_offset; + int initial_xmit_delay; + int first_line_bpg_offset; + int second_line_bpg_offset; + int flatness_min_qp; + int flatness_max_qp; + int flatness_det_thresh; + qp_set qp_min; + qp_set qp_max; + qp_set ofs; + int rc_model_size; + int rc_edge_factor; + int rc_tgt_offset_hi; + int rc_tgt_offset_lo; + int rc_buf_thresh[QP_SET_SIZE - 1]; +}; + +enum colour_mode { + CM_RGB, /* 444 RGB */ + CM_444, /* 444 YUV or simple 422 */ + CM_422, /* native 422 */ + CM_420 /* native 420 */ +}; + +enum bits_per_comp { + BPC_8 = 8, + BPC_10 = 10, + BPC_12 = 12 +}; + +enum max_min { + DAL_MM_MIN = 0, + DAL_MM_MAX = 1 +}; + +struct qp_entry { + float bpp; + const qp_set qps; +}; + +typedef struct qp_entry qp_table[]; + +void _do_calc_rc_params(struct rc_params *rc, + enum colour_mode cm, + enum bits_per_comp bpc, + u16 drm_bpp, + bool is_navite_422_or_420, + int slice_width, + int slice_height, + int minor_version); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index 8d31eb75c6a6..a2537229ee88 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -1,35 +1,6 @@ # SPDX-License-Identifier: MIT # # Makefile for the 'dsc' sub-component of DAL. - -ifdef CONFIG_X86 -dsc_ccflags := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -dsc_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -dsc_ccflags += -mpreferred-stack-boundary=4 -else -dsc_ccflags += -msse2 -endif -endif - -CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags) - DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC)) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index f5b7da0e64c0..9c74564cbd8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -40,8 +40,15 @@ static bool dsc_policy_enable_dsc_when_not_needed; static bool dsc_policy_disable_dsc_stream_overhead; +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + /* Forward Declerations */ -static void get_dsc_bandwidth_range( +static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, const uint32_t num_slices_h, @@ -76,11 +83,6 @@ static bool setup_dsc_config( int max_dsc_target_bpp_limit_override_x16, struct dc_dsc_config *dsc_cfg); -static struct fixed31_32 compute_dsc_max_bandwidth_overhead( - const struct dc_crtc_timing *timing, - const int num_slices_h, - const bool is_dp); - static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -361,7 +363,7 @@ bool dc_dsc_compute_bandwidth_range( dsc_min_slice_height_override, max_bpp_x16, &config); if (is_dsc_possible) - get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, + is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, config.num_slices_h, &dsc_common_caps, timing, range); return is_dsc_possible; @@ -453,6 +455,7 @@ static bool intersect_dsc_caps( if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8); + dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel; dsc_common_caps->is_dp = dsc_sink_caps->is_dp; return true; } @@ -462,32 +465,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) return (value + 9) / 10; } -static struct fixed31_32 compute_dsc_max_bandwidth_overhead( - const struct dc_crtc_timing *timing, - const int num_slices_h, - const bool is_dp) -{ - struct fixed31_32 max_dsc_overhead; - struct fixed31_32 refresh_rate; - - if (dsc_policy_disable_dsc_stream_overhead || !is_dp) - return dc_fixpt_from_int(0); - - /* use target bpp that can take entire target bandwidth */ - refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz); - refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total); - refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total); - refresh_rate = dc_fixpt_mul_int(refresh_rate, 100); - - max_dsc_overhead = dc_fixpt_from_int(num_slices_h); - max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total); - max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256); - max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000); - max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate); - - return max_dsc_overhead; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -495,14 +472,14 @@ static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bpp_increment_div, const bool is_dp) { - struct fixed31_32 overhead_in_kbps; + uint32_t overhead_in_kbps; struct fixed31_32 effective_bandwidth_in_kbps; struct fixed31_32 bpp_x16; - overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps( timing, num_slices_h, is_dp); effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps); - effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps, + effective_bandwidth_in_kbps = dc_fixpt_sub_int(effective_bandwidth_in_kbps, overhead_in_kbps); bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10); bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz); @@ -512,10 +489,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth( return dc_fixpt_floor(bpp_x16); } -/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock - * and uncompressed bandwidth. +/* Decide DSC bandwidth range based on signal, timing, specs specific and input min and max + * requirements. + * The range output includes decided min/max target bpp, the respective bandwidth requirements + * and native timing bandwidth requirement when DSC is not used. */ -static void get_dsc_bandwidth_range( +static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, const uint32_t num_slices_h, @@ -523,39 +502,52 @@ static void get_dsc_bandwidth_range( const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range) { - /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); - - /* max dsc target bpp */ - range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, - max_bpp_x16, num_slices_h, dsc_caps->is_dp); - range->max_target_bpp_x16 = max_bpp_x16; - if (range->max_kbps > range->stream_kbps) { - /* max dsc target bpp is capped to native bandwidth */ - range->max_kbps = range->stream_kbps; - range->max_target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( - range->max_kbps, timing, num_slices_h, - dsc_caps->bpp_increment_div, - dsc_caps->is_dp); + uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; + + memset(range, 0, sizeof(*range)); + + /* apply signal, timing, specs and explicitly specified DSC range requirements */ + if (preferred_bpp_x16) { + if (preferred_bpp_x16 <= max_bpp_x16 && + preferred_bpp_x16 >= min_bpp_x16) { + range->max_target_bpp_x16 = preferred_bpp_x16; + range->min_target_bpp_x16 = preferred_bpp_x16; + } + } + /* TODO - make this value generic to all signal types */ + else if (dsc_caps->edp_sink_max_bits_per_pixel) { + /* apply max bpp limitation from edp sink */ + range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel, + max_bpp_x16); + range->min_target_bpp_x16 = min_bpp_x16; + } + else { + range->max_target_bpp_x16 = max_bpp_x16; + range->min_target_bpp_x16 = min_bpp_x16; } - /* min dsc target bpp */ - range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, - min_bpp_x16, num_slices_h, dsc_caps->is_dp); - range->min_target_bpp_x16 = min_bpp_x16; - if (range->min_kbps > range->max_kbps) { - /* min dsc target bpp is capped to max dsc bandwidth*/ - range->min_kbps = range->max_kbps; - range->min_target_bpp_x16 = range->max_target_bpp_x16; + /* populate output structure */ + if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) { + /* native stream bandwidth */ + range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + + /* max dsc target bpp */ + range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp); + + /* min dsc target bpp */ + range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp); } + + return range->max_kbps >= range->min_kbps && range->min_kbps > 0; } /* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy. * * Returns: - * - 'true' if DSC was required by policy and was successfully applied - * - 'false' if DSC was not necessary (e.g. if uncompressed stream fits 'target_bandwidth_kbps'), - * or if it couldn't be applied based on DSC policy. + * - 'true' if target bpp is decided + * - 'false' if target bpp cannot be decided (e.g. cannot fit even with min DSC bpp), */ static bool decide_dsc_target_bpp_x16( const struct dc_dsc_policy *policy, @@ -565,43 +557,32 @@ static bool decide_dsc_target_bpp_x16( const int num_slices_h, int *target_bpp_x16) { - bool should_use_dsc = false; struct dc_dsc_bw_range range; - memset(&range, 0, sizeof(range)); - - get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - num_slices_h, dsc_common_caps, timing, &range); - if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) { - /* enough bandwidth without dsc */ - *target_bpp_x16 = 0; - should_use_dsc = false; - } else if (policy->preferred_bpp_x16 > 0 && - policy->preferred_bpp_x16 <= range.max_target_bpp_x16 && - policy->preferred_bpp_x16 >= range.min_target_bpp_x16) { - *target_bpp_x16 = policy->preferred_bpp_x16; - should_use_dsc = true; - } else if (target_bandwidth_kbps >= range.max_kbps) { - /* use max target bpp allowed */ - *target_bpp_x16 = range.max_target_bpp_x16; - should_use_dsc = true; - } else if (target_bandwidth_kbps >= range.min_kbps) { - /* use target bpp that can take entire target bandwidth */ - *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( - target_bandwidth_kbps, timing, num_slices_h, - dsc_common_caps->bpp_increment_div, - dsc_common_caps->is_dp); - should_use_dsc = true; - } else { - /* not enough bandwidth to fulfill minimum requirement */ - *target_bpp_x16 = 0; - should_use_dsc = false; + *target_bpp_x16 = 0; + + if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, + num_slices_h, dsc_common_caps, timing, &range)) { + if (target_bandwidth_kbps >= range.stream_kbps) { + if (policy->enable_dsc_when_not_needed) + /* enable max bpp even dsc is not needed */ + *target_bpp_x16 = range.max_target_bpp_x16; + } else if (target_bandwidth_kbps >= range.max_kbps) { + /* use max target bpp allowed */ + *target_bpp_x16 = range.max_target_bpp_x16; + } else if (target_bandwidth_kbps >= range.min_kbps) { + /* use target bpp that can take entire target bandwidth */ + *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( + target_bandwidth_kbps, timing, num_slices_h, + dsc_common_caps->bpp_increment_div, + dsc_common_caps->is_dp); + } } - return should_use_dsc; + return *target_bpp_x16 != 0; } -#define MIN_AVAILABLE_SLICES_SIZE 4 +#define MIN_AVAILABLE_SLICES_SIZE 6 static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices) { @@ -887,6 +868,10 @@ static bool setup_dsc_config( min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? is_dsc_possible = (min_slices_h <= max_slices_h); + + if (min_slices_h == 0 && max_slices_h == 0) + is_dsc_possible = false; + if (!is_dsc_possible) goto done; @@ -994,19 +979,45 @@ bool dc_dsc_compute_config( uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp) { - struct fixed31_32 overhead_in_kbps; + uint32_t overhead_in_kbps; struct fixed31_32 bpp; struct fixed31_32 actual_bandwidth_in_kbps; - overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps( timing, num_slices_h, is_dp); bpp = dc_fixpt_from_fraction(bpp_x16, 16); actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10); actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp); - actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, overhead_in_kbps); + actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, overhead_in_kbps); return dc_fixpt_ceil(actual_bandwidth_in_kbps); } +uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps( + const struct dc_crtc_timing *timing, + const int num_slices_h, + const bool is_dp) +{ + struct fixed31_32 max_dsc_overhead; + struct fixed31_32 refresh_rate; + + if (dsc_policy_disable_dsc_stream_overhead || !is_dp) + return 0; + + /* use target bpp that can take entire target bandwidth */ + refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total); + refresh_rate = dc_fixpt_mul_int(refresh_rate, 100); + + max_dsc_overhead = dc_fixpt_from_int(num_slices_h); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256); + max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000); + max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate); + + return dc_fixpt_ceil(max_dsc_overhead); +} + void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy) @@ -1064,8 +1075,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, return; } - policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16; - /* internal upper limit, default 16 bpp */ if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) policy->max_target_bpp = dsc_policy_max_target_bpp_limit; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index 7b294f637881..e97cf09be9d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -23,266 +23,7 @@ * Authors: AMD * */ -#include <drm/drm_dsc.h> - -#include "os_types.h" #include "rc_calc.h" -#include "qp_tables.h" - -#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min) - -#define MODE_SELECT(val444, val422, val420) \ - (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420)) - - -#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \ - table = qp_table_##mode##_##bpc##bpc_##max; \ - table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \ - break - - -static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, - enum max_min max_min, float bpp) -{ - int mode = MODE_SELECT(444, 422, 420); - int sel = table_hash(mode, bpc, max_min); - int table_size = 0; - int index; - const struct qp_entry *table = 0L; - - // alias enum - enum { min = DAL_MM_MIN, max = DAL_MM_MAX }; - switch (sel) { - TABLE_CASE(444, 8, max); - TABLE_CASE(444, 8, min); - TABLE_CASE(444, 10, max); - TABLE_CASE(444, 10, min); - TABLE_CASE(444, 12, max); - TABLE_CASE(444, 12, min); - TABLE_CASE(422, 8, max); - TABLE_CASE(422, 8, min); - TABLE_CASE(422, 10, max); - TABLE_CASE(422, 10, min); - TABLE_CASE(422, 12, max); - TABLE_CASE(422, 12, min); - TABLE_CASE(420, 8, max); - TABLE_CASE(420, 8, min); - TABLE_CASE(420, 10, max); - TABLE_CASE(420, 10, min); - TABLE_CASE(420, 12, max); - TABLE_CASE(420, 12, min); - } - - if (table == 0) - return; - - index = (bpp - table[0].bpp) * 2; - - /* requested size is bigger than the table */ - if (index >= table_size) { - dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n"); - return; - } - - memcpy(qps, table[index].qps, sizeof(qp_set)); -} - -static double dsc_roundf(double num) -{ - if (num < 0.0) - num = num - 0.5; - else - num = num + 0.5; - - return (int)(num); -} - -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - -static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp) -{ - int *p = ofs; - - if (mode == CM_444 || mode == CM_RGB) { - *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0)))))); - *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0)))))); - *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0)))); - *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else if (mode == CM_422) { - *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0)))); - *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0)))); - *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } else { - *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0)))); - *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0)))); - *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0)))); - *p++ = -10; - *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0)))); - *p++ = -12; - *p++ = -12; - *p++ = -12; - } -} - -static int median3(int a, int b, int c) -{ - if (a > b) - swap(a, b); - if (b > c) - swap(b, c); - if (a > b) - swap(b, c); - - return b; -} - -static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, - enum bits_per_comp bpc, u16 drm_bpp, - bool is_navite_422_or_420, - int slice_width, int slice_height, - int minor_version) -{ - float bpp; - float bpp_group; - float initial_xmit_delay_factor; - int padding_pixels; - int i; - - bpp = ((float)drm_bpp / 16.0); - /* in native_422 or native_420 modes, the bits_per_pixel is double the - * target bpp (the latter is what calc_rc_params expects) - */ - if (is_navite_422_or_420) - bpp /= 2.0; - - rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - - bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0); - - switch (cm) { - case CM_420: - rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group))); - break; - case CM_422: - rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584)))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - case CM_444: - case CM_RGB: - rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2))))); - rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group))); - rc->second_line_bpg_offset = 0; - break; - } - - initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0; - rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor); - - if (cm == CM_422 || cm == CM_420) - slice_width /= 2; - - padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0; - if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) { - if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1) - rc->initial_xmit_delay++; - } - - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); - rc->flatness_det_thresh = 2 << (bpc - 8); - - get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp); - get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp); - if (cm == CM_444 && minor_version == 1) { - for (i = 0; i < QP_SET_SIZE; ++i) { - rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0; - rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0; - } - } - get_ofs_set(rc->ofs, cm, bpp); - - /* fixed parameters */ - rc->rc_model_size = 8192; - rc->rc_edge_factor = 6; - rc->rc_tgt_offset_hi = 3; - rc->rc_tgt_offset_lo = 3; - - rc->rc_buf_thresh[0] = 896; - rc->rc_buf_thresh[1] = 1792; - rc->rc_buf_thresh[2] = 2688; - rc->rc_buf_thresh[3] = 3584; - rc->rc_buf_thresh[4] = 4480; - rc->rc_buf_thresh[5] = 5376; - rc->rc_buf_thresh[6] = 6272; - rc->rc_buf_thresh[7] = 6720; - rc->rc_buf_thresh[8] = 7168; - rc->rc_buf_thresh[9] = 7616; - rc->rc_buf_thresh[10] = 7744; - rc->rc_buf_thresh[11] = 7872; - rc->rc_buf_thresh[12] = 8000; - rc->rc_buf_thresh[13] = 8064; -} - -static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} /** * calc_rc_params - reads the user's cmdline mode @@ -319,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps) pps->dsc_version_minor); DC_FP_END(); } - -/** - * calc_dsc_bytes_per_pixel - calculate bytes per pixel - * @pps: DRM struct with all required DSC values - * - * Based on the information inside drm_dsc_config, this function calculates the - * total of bytes per pixel. - * - * @note This calculation requires float point operation, most of it executes - * under kernel_fpu_{begin,end}. - * - * Return: - * Return the number of bytes per pixel - */ -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) - -{ - u32 ret; - u16 drm_bpp = pps->bits_per_pixel; - int slice_width = pps->slice_width; - bool is_navite_422_or_420 = pps->native_422 || pps->native_420; - - DC_FP_START(); - ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp, - is_navite_422_or_420); - DC_FP_END(); - return ret; -} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index 262f06afcbf9..80921c1c0d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -27,58 +27,9 @@ #ifndef __RC_CALC_H__ #define __RC_CALC_H__ - -#define QP_SET_SIZE 15 - -typedef int qp_set[QP_SET_SIZE]; - -struct rc_params { - int rc_quant_incr_limit0; - int rc_quant_incr_limit1; - int initial_fullness_offset; - int initial_xmit_delay; - int first_line_bpg_offset; - int second_line_bpg_offset; - int flatness_min_qp; - int flatness_max_qp; - int flatness_det_thresh; - qp_set qp_min; - qp_set qp_max; - qp_set ofs; - int rc_model_size; - int rc_edge_factor; - int rc_tgt_offset_hi; - int rc_tgt_offset_lo; - int rc_buf_thresh[QP_SET_SIZE - 1]; -}; - -enum colour_mode { - CM_RGB, /* 444 RGB */ - CM_444, /* 444 YUV or simple 422 */ - CM_422, /* native 422 */ - CM_420 /* native 420 */ -}; - -enum bits_per_comp { - BPC_8 = 8, - BPC_10 = 10, - BPC_12 = 12 -}; - -enum max_min { - DAL_MM_MIN = 0, - DAL_MM_MAX = 1 -}; - -struct qp_entry { - float bpp; - const qp_set qps; -}; - -typedef struct qp_entry qp_table[]; +#include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index ef830aded5b1..7e306aa3e2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -22,7 +22,6 @@ * Authors: AMD * */ -#include "os_types.h" #include <drm/drm_dsc.h> #include "dscc_types.h" #include "rc_calc.h" @@ -101,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par int ret; struct rc_params rc; struct drm_dsc_config dsc_cfg; - - dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps); + unsigned long long tmp; calc_rc_params(&rc, pps); dsc_params->pps = *pps; @@ -114,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); + tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); + do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP + dsc_params->bytes_per_pixel = (uint32_t)tmp; copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index c5c840a06050..5029d4e42dbf 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -105,6 +105,7 @@ bool dal_hw_factory_init( case DCN_VERSION_2_0: dal_hw_factory_dcn20_init(factory); return true; + case DCN_VERSION_2_01: case DCN_VERSION_2_1: dal_hw_factory_dcn21_init(factory); return true; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 4a9848308766..904bd30bed68 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -100,6 +100,7 @@ bool dal_hw_translate_init( case DCN_VERSION_2_0: dal_hw_translate_dcn20_init(translate); return true; + case DCN_VERSION_2_01: case DCN_VERSION_2_1: dal_hw_translate_dcn21_init(translate); return true; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 45a6216dfa2a..6fc6488c54c0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -62,6 +62,7 @@ struct link_init_data { uint32_t connector_index; /* this will be mapped to the HPD pins */ uint32_t link_index; /* this is mapped to DAL display_index TODO: remove it when DC is complete. */ + bool is_dpia_link; }; struct dc_link *link_create(const struct link_init_data *init_params); @@ -245,8 +246,16 @@ struct resource_pool { * entries in link_encoders array. */ unsigned int dig_link_enc_count; + /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/ + unsigned int usb4_dpia_count; #if defined(CONFIG_DRM_AMD_DC_DCN) + unsigned int hpo_dp_stream_enc_count; + struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS]; + unsigned int hpo_dp_link_enc_count; + struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; +#endif +#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; #endif @@ -298,6 +307,9 @@ struct stream_resource { struct display_stream_compressor *dsc; struct timing_generator *tg; struct stream_encoder *stream_enc; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *hpo_dp_stream_enc; +#endif struct audio *audio; struct pixel_clk_params pix_clk_params; @@ -366,6 +378,9 @@ struct pipe_ctx { struct _vcs_dpi_display_ttu_regs_st ttu_regs; struct _vcs_dpi_display_rq_regs_st rq_regs; struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param; + struct _vcs_dpi_display_rq_params_st dml_rq_param; + struct _vcs_dpi_display_dlg_sys_params_st dml_dlg_sys_param; + struct _vcs_dpi_display_e2e_pipe_params_st dml_input; int det_buffer_size_kb; bool unbounded_req; #endif @@ -375,6 +390,17 @@ struct pipe_ctx { bool vtp_locked; }; +/* Data used for dynamic link encoder assignment. + * Tracks current and future assignments; available link encoders; + * and mode of operation (whether to use current or future assignments). + */ +struct link_enc_cfg_context { + enum link_enc_cfg_mode mode; + struct link_enc_assignment link_enc_assignments[MAX_PIPES]; + enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + struct link_enc_assignment transient_assignments[MAX_PIPES]; +}; + struct resource_context { struct pipe_ctx pipe_ctx[MAX_PIPES]; bool is_stream_enc_acquired[MAX_PIPES * 2]; @@ -382,12 +408,10 @@ struct resource_context { uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES]; uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; - /* A table/array of encoder-to-link assignments. One entry per stream. - * Indexed by stream index in dc_state. - */ - struct link_enc_assignment link_enc_assignments[MAX_PIPES]; - /* List of available link encoders. Uses engine ID as encoder identifier. */ - enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + struct link_enc_cfg_context link_enc_cfg_ctx; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; +#endif #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_mpc_3dlut_acquired[MAX_PIPES]; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 4d7b271b6409..95fb61d62778 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -69,6 +69,7 @@ struct ddc_service_init_data { struct graphics_object_id id; struct dc_context *ctx; struct dc_link *link; + bool is_dpia_link; }; struct ddc_service *dal_ddc_service_create( diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 01c3a31be191..a6d3d859754a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -30,6 +30,7 @@ #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ #define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ #define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ +#define MAX_MTP_SLOT_COUNT 64 #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50 #define TRAINING_AUX_RD_INTERVAL 100 //us @@ -120,12 +121,12 @@ enum dc_status dpcd_set_lane_settings( const struct link_training_settings *link_training_setting, uint32_t offset); /* Read training status and adjustment requests from DPCD. */ -enum dc_status dp_get_lane_status_and_drive_settings( +enum dc_status dp_get_lane_status_and_lane_adjust( struct dc_link *link, const struct link_training_settings *link_training_setting, - union lane_status *ln_status, - union lane_align_status_updated *ln_status_updated, - struct link_training_settings *req_settings, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], uint32_t offset); void dp_wait_for_training_aux_rd_interval( @@ -146,10 +147,15 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status); bool dp_is_max_vs_reached( const struct link_training_settings *lt_settings); - -void dp_update_drive_settings( - struct link_training_settings *dest, - struct link_training_settings src); +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval); @@ -165,7 +171,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); -bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable); +bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); @@ -189,5 +195,26 @@ enum dc_status dpcd_configure_lttpr_mode( struct link_training_settings *lt_settings); enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); +bool dpcd_write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate); + +enum dc_status dpcd_configure_channel_coding( + struct dc_link *link, + struct link_training_settings *lt_settings); + +bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link); + +struct fixed31_32 calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link); +void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings); +void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); +void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); +void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); + bool dp_retrieve_lttpr_cap(struct dc_link *link); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h new file mode 100644 index 000000000000..974d703e3771 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPIA_H__ +#define __DC_LINK_DPIA_H__ + +/* This module implements functionality for training DPIA links. */ + +struct dc_link; +struct dc_link_settings; + +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + +/* Extend interval between training status checks for manual testing. */ +#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 + +/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ +/* DPCD DP Tunneling over USB4 */ +#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b + +/* SET_CONFIG message types sent by driver. */ +enum dpia_set_config_type { + DPIA_SET_CFG_SET_LINK = 0x01, + DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, + DPIA_SET_CFG_SET_TRAINING = 0x18, + DPIA_SET_CFG_SET_VSPE = 0x19 +}; + +/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ +enum dpia_set_config_ts { + DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ + DPIA_TS_TPS1 = 0x01, + DPIA_TS_TPS2 = 0x02, + DPIA_TS_TPS3 = 0x03, + DPIA_TS_TPS4 = 0x07, + DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ +}; + +/* SET_CONFIG message data associated with messages sent by driver. */ +union dpia_set_config_data { + struct { + uint8_t mode : 1; + uint8_t reserved : 7; + } set_link; + struct { + uint8_t stage; + } set_training; + struct { + uint8_t swing : 2; + uint8_t max_swing_reached : 1; + uint8_t pre_emph : 2; + uint8_t max_pre_emph_reached : 1; + uint8_t reserved : 2; + } set_vspe; + uint8_t raw; +}; + +/* Read tunneling device capability from DPCD and update link capability + * accordingly. + */ +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); + +/* Train DP tunneling link for USB4 DPIA display endpoint. + * DPIA equivalent of dc_link_dp_perfrorm_link_training. + * Aborts link training upon detection of sink unplug. + */ +enum link_training_result +dc_link_dpia_perform_link_training(struct dc_link *link, + const struct dc_link_settings *link_setting, + bool skip_video_pattern); + +#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 806f3041db14..337c0161e72d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -619,7 +619,7 @@ struct dcn_ip_params { }; extern const struct dcn_ip_params dcn10_ip_defaults; -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h new file mode 100644 index 000000000000..5dcfbd8e2697 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DML_WRAPPER_H_ +#define DML_WRAPPER_H_ + +#include "dc.h" +#include "dml/display_mode_vba.h" + +bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index 142753644377..ecb4191b6e64 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -54,6 +54,7 @@ struct abm_funcs { const char *src, unsigned int bytes, unsigned int inst); + bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index a262f3278c21..1391c20f1852 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -102,6 +102,11 @@ enum dentist_divider_range { .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \ .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 +#define CLK_COMMON_REG_LIST_DCN_201() \ + SR(DENTIST_DISPCLK_CNTL), \ + CLK_SRI(CLK4_CLK_PLL_REQ, CLK4, 0), \ + CLK_SRI(CLK4_CLK2_CURRENT_CNT, CLK4, 0) + #define CLK_REG_LIST_NV10() \ SR(DENTIST_DISPCLK_CNTL), \ CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \ @@ -144,6 +149,12 @@ enum dentist_divider_range { CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\ CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh) +#define CLK_COMMON_MASK_SH_LIST_DCN201_BASE(mask_sh) \ + CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\ + CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\ + CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh),\ + CLK_SF(CLK4_0_CLK4_CLK_PLL_REQ, FbMult_int, mask_sh) + #define CLK_REG_FIELD_LIST(type) \ type DPREFCLK_SRC_SEL; \ type DENTIST_DPREFCLK_WDIVIDER; \ @@ -179,6 +190,8 @@ struct clk_mgr_mask { struct clk_mgr_registers { uint32_t DPREFCLK_CNTL; uint32_t DENTIST_DISPCLK_CNTL; + uint32_t CLK4_CLK2_CURRENT_CNT; + uint32_t CLK4_CLK_PLL_REQ; uint32_t CLK3_CLK2_DFS_CNTL; uint32_t CLK3_CLK_PLL_REQ; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 0afa2364a986..c940fdfda144 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -79,7 +79,30 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); +#if defined(CONFIG_DRM_AMD_DC_DCN) + void (*set_dpstreamclk)( + struct dccg *dccg, + enum hdmistreamclk_source src, + int otg_inst); + + void (*enable_symclk32_se)( + struct dccg *dccg, + int hpo_se_inst, + enum phyd32clk_clock_source phyd32clk); + void (*disable_symclk32_se)( + struct dccg *dccg, + int hpo_se_inst); + + void (*enable_symclk32_le)( + struct dccg *dccg, + int hpo_le_inst, + enum phyd32clk_clock_source phyd32clk); + + void (*disable_symclk32_le)( + struct dccg *dccg, + int hpo_le_inst); +#endif void (*set_physymclk)( struct dccg *dccg, int phy_inst, @@ -100,6 +123,15 @@ struct dccg_funcs { void (*set_dispclk_change_mode)( struct dccg *dccg, enum dentist_dispclk_change_mode change_mode); + + void (*disable_dsc)( + struct dccg *dccg, + int inst); + + void (*enable_dsc)( + struct dccg *dccg, + int inst); + }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 00fc81431b43..3ef7faa92052 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -29,6 +29,17 @@ #include "transform.h" +union defer_reg_writes { + struct { + bool disable_blnd_lut:1; + bool disable_3dlut:1; + bool disable_shaper:1; + bool disable_gamcor:1; + bool disable_dscl:1; + } bits; + uint32_t raw; +}; + struct dpp { const struct dpp_funcs *funcs; struct dc_context *ctx; @@ -43,6 +54,7 @@ struct dpp { struct pwl_params regamma_params; struct pwl_params degamma_params; struct dpp_cursor_attributes cur_attr; + union defer_reg_writes deferred_reg_writes; struct pwl_params shaper_params; bool cm_bypass_mode; @@ -245,6 +257,8 @@ struct dpp_funcs { bool dppclk_div, bool enable); + void (*dpp_deferred_update)( + struct dpp *dpp); bool (*dpp_program_blnd_lut)( struct dpp *dpp, const struct pwl_params *params); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index f94135c6e3c2..346f0ba73e86 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -61,6 +61,8 @@ struct dcn_dsc_state { uint32_t dsc_pic_height; uint32_t dsc_slice_bpg_offset; uint32_t dsc_chunk_size; + uint32_t dsc_fw_en; + uint32_t dsc_opp_source; }; @@ -88,6 +90,7 @@ struct dsc_enc_caps { int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ int32_t max_slice_width; uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ + uint32_t edp_sink_max_bits_per_pixel; bool is_dp; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index ec28cb9c3a8e..fd6572ba3fb2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -171,10 +171,9 @@ struct dwbc { bool dwb_is_efc_transition; bool dwb_is_drc; int wb_src_plane_inst;/*hubp, mpcc, inst*/ - bool update_privacymask; uint32_t mask_id; - int otg_inst; - bool mvc_cfg; + int otg_inst; + bool mvc_cfg; }; struct dwbc_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 31a1713bb49f..10ecbc667ffa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -38,6 +38,10 @@ #define MAX_PIPES 6 #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define MAX_HPO_DP2_ENCODERS 4 +#define MAX_HPO_DP2_LINK_ENCODERS 2 +#endif struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 9eaf345aa2a1..bb0e91756ddd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -59,6 +59,10 @@ struct encoder_feature_support { uint32_t IS_TPS3_CAPABLE:1; uint32_t IS_TPS4_CAPABLE:1; uint32_t HDMI_6GB_EN:1; + uint32_t IS_DP2_CAPABLE:1; + uint32_t IS_UHBR10_CAPABLE:1; + uint32_t IS_UHBR13_5_CAPABLE:1; + uint32_t IS_UHBR20_CAPABLE:1; uint32_t DP_IS_USB_C:1; } bits; uint32_t raw; @@ -208,6 +212,99 @@ struct link_enc_assignment { bool valid; struct display_endpoint_id ep_id; enum engine_id eng_id; + struct dc_stream_state *stream; }; +enum link_enc_cfg_mode { + LINK_ENC_CFG_STEADY, /* Normal operation - use current_state. */ + LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */ +}; + +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum dp2_link_mode { + DP2_LINK_TRAINING_TPS1, + DP2_LINK_TRAINING_TPS2, + DP2_LINK_ACTIVE, + DP2_TEST_PATTERN +}; + +enum dp2_phy_tp_select { + DP_DPHY_TP_SELECT_TPS1, + DP_DPHY_TP_SELECT_TPS2, + DP_DPHY_TP_SELECT_PRBS, + DP_DPHY_TP_SELECT_CUSTOM, + DP_DPHY_TP_SELECT_SQUARE +}; + +enum dp2_phy_tp_prbs { + DP_DPHY_TP_PRBS7, + DP_DPHY_TP_PRBS9, + DP_DPHY_TP_PRBS11, + DP_DPHY_TP_PRBS15, + DP_DPHY_TP_PRBS23, + DP_DPHY_TP_PRBS31 +}; + +struct hpo_dp_link_enc_state { + uint32_t link_enc_enabled; + uint32_t link_mode; + uint32_t lane_count; + uint32_t slot_count[4]; + uint32_t stream_src[4]; + uint32_t vc_rate_x[4]; + uint32_t vc_rate_y[4]; +}; + +struct hpo_dp_link_encoder { + const struct hpo_dp_link_encoder_funcs *funcs; + struct dc_context *ctx; + int inst; + enum engine_id preferred_engine; + enum transmitter transmitter; + enum hpd_source_id hpd_source; +}; + +struct hpo_dp_link_encoder_funcs { + + void (*enable_link_phy)(struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + enum transmitter transmitter); + + void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc, + enum signal_type signal); + + void (*link_enable)( + struct hpo_dp_link_encoder *enc, + enum dc_lane_count num_lanes); + + void (*link_disable)( + struct hpo_dp_link_encoder *enc); + + void (*set_link_test_pattern)( + struct hpo_dp_link_encoder *enc, + struct encoder_set_dp_phy_pattern_param *tp_params); + + void (*update_stream_allocation_table)( + struct hpo_dp_link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + + void (*set_throttled_vcp_size)( + struct hpo_dp_link_encoder *enc, + uint32_t stream_encoder_inst, + struct fixed31_32 avg_time_slots_per_mtp); + + bool (*is_in_alt_mode) ( + struct hpo_dp_link_encoder *enc); + + void (*read_state)( + struct hpo_dp_link_encoder *enc, + struct hpo_dp_link_enc_state *state); + + void (*set_ffe)( + struct hpo_dp_link_encoder *enc, + const struct dc_link_settings *link_settings, + uint8_t ffe_preset); +}; +#endif + #endif /* LINK_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 640bb432bd6a..f5fd2a067323 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -281,6 +281,7 @@ struct mpc_funcs { struct mpcc* (*get_mpcc_for_dpp_from_secondary)( struct mpc_tree *tree, int dpp_id); + struct mpcc* (*get_mpcc_for_dpp)( struct mpc_tree *tree, int dpp_id); @@ -366,6 +367,7 @@ struct mpc_funcs { void (*set_bg_color)(struct mpc *mpc, struct tg_color *bg_color, int mpcc_id); + void (*set_mpc_mem_lp_mode)(struct mpc *mpc); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 564ea6a727b0..073f8b667eff 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -164,10 +164,16 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); + void (*reset_fifo)( + struct stream_encoder *enc + ); + void (*dp_blank)( + struct dc_link *link, struct stream_encoder *enc); void (*dp_unblank)( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param); @@ -227,7 +233,8 @@ struct stream_encoder_funcs { void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps); + uint8_t *dsc_packed_pps, + bool immediate_update); void (*set_dynamic_metadata)(struct stream_encoder *enc, bool enable, @@ -242,4 +249,86 @@ struct stream_encoder_funcs { struct stream_encoder *enc); }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_stream_encoder_state { + uint32_t stream_enc_enabled; + uint32_t vid_stream_enabled; + uint32_t otg_inst; + uint32_t pixel_encoding; + uint32_t component_depth; + uint32_t compressed_format; + uint32_t sdp_enabled; + uint32_t mapped_to_link_enc; +}; + +struct hpo_dp_stream_encoder { + const struct hpo_dp_stream_encoder_funcs *funcs; + struct dc_context *ctx; + struct dc_bios *bp; + uint32_t inst; + enum engine_id id; + struct vpg *vpg; + struct apg *apg; +}; + +struct hpo_dp_stream_encoder_funcs { + void (*enable_stream)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_unblank)( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_source); + + void (*dp_blank)( + struct hpo_dp_stream_encoder *enc); + + void (*disable)( + struct hpo_dp_stream_encoder *enc); + + void (*set_stream_attribute)( + struct hpo_dp_stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space, + bool use_vsc_sdp_for_colorimetry, + bool compressed_format, + bool double_buffer_en); + + void (*update_dp_info_packets)( + struct hpo_dp_stream_encoder *enc, + const struct encoder_info_frame *info_frame); + + void (*stop_dp_info_packets)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_set_dsc_pps_info_packet)( + struct hpo_dp_stream_encoder *enc, + bool enable, + uint8_t *dsc_packed_pps, + bool immediate_update); + + void (*map_stream_to_link)( + struct hpo_dp_stream_encoder *enc, + uint32_t stream_enc_inst, + uint32_t link_enc_inst); + + void (*audio_mute_control)( + struct hpo_dp_stream_encoder *enc, bool mute); + + void (*dp_audio_setup)( + struct hpo_dp_stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info); + + void (*dp_audio_enable)( + struct hpo_dp_stream_encoder *enc); + + void (*dp_audio_disable)( + struct hpo_dp_stream_encoder *enc); + + void (*read_state)( + struct hpo_dp_stream_encoder *enc, + struct hpo_dp_stream_encoder_state *state); +}; +#endif + #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 03f47f23fb65..c29320b3855d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -100,6 +100,9 @@ enum crc_selection { enum otg_out_mux_dest { OUT_MUX_DIO = 0, +#if defined(CONFIG_DRM_AMD_DC_DCN) + OUT_MUX_HPO_DP = 2, +#endif }; enum h_timing_div_mode { @@ -287,6 +290,8 @@ struct timing_generator_funcs { enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); + void (*get_dsc_status)(struct timing_generator *optc, + uint32_t *dsc_mode); void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index ad5f2adcc40d..d50f4bd06b5d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -236,7 +236,7 @@ struct hw_sequencer_funcs { const struct tg_color *solid_color, int width, int height, int offset); - void (*z10_restore)(struct dc *dc); + void (*z10_restore)(const struct dc *dc); void (*z10_save_init)(struct dc *dc); void (*update_visual_confirm_color)(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index f7f7e4fff0c2..c2008258c50a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,6 +41,9 @@ struct dce_hwseq_wa { bool DEGVIDCN10_254; bool DEGVIDCN21; bool disallow_self_refresh_during_multi_plane_transition; +#if defined(CONFIG_DRM_AMD_DC_DCN) + bool dp_hpo_and_otg_sequence; +#endif }; struct hwseq_wa_state { @@ -140,6 +143,7 @@ struct hwseq_private_funcs { const struct dc_plane_state *plane_state); void (*PLAT_58856_wa)(struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*setup_hpo_hw_control)(const struct dce_hwseq *hws, bool enable); }; struct dce_hwseq { @@ -151,6 +155,10 @@ struct dce_hwseq { struct hwseq_wa_state wa_state; struct hwseq_private_funcs funcs; + PHYSICAL_ADDRESS_LOC fb_base; + PHYSICAL_ADDRESS_LOC fb_top; + PHYSICAL_ADDRESS_LOC fb_offset; + PHYSICAL_ADDRESS_LOC uma_top; }; #endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 883dd8733ea4..a4e43b4826e0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -36,7 +36,7 @@ * Initialise link encoder resource tracking. */ void link_enc_cfg_init( - struct dc *dc, + const struct dc *dc, struct dc_state *state); /* @@ -70,22 +70,36 @@ void link_enc_cfg_link_enc_unassign( * endpoint. */ bool link_enc_cfg_is_transmitter_mappable( - struct dc_state *state, + struct dc *dc, struct link_encoder *link_enc); +/* Return stream using DIG link encoder resource. NULL if unused. */ +struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc( + struct dc *dc, + enum engine_id eng_id); + /* Return link using DIG link encoder resource. NULL if unused. */ struct dc_link *link_enc_cfg_get_link_using_link_enc( - struct dc_state *state, + struct dc *dc, enum engine_id eng_id); /* Return DIG link encoder used by link. NULL if unused. */ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( - struct dc_state *state, + struct dc *dc, const struct dc_link *link); /* Return next available DIG link encoder. NULL if none available. */ -struct link_encoder *link_enc_cfg_get_next_avail_link_enc( - const struct dc *dc, - const struct dc_state *state); +struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc); + +/* Return DIG link encoder used by stream. NULL if unused. */ +struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( + struct dc *dc, + const struct dc_stream_state *stream); + +/* Return true if encoder available to use. */ +bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link); + +/* Returns true if encoder assignments in supplied state pass validity checks. */ +bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state); #endif /* DC_INC_LINK_ENC_CFG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index fc1d289bb9fe..ba664bc49595 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -37,6 +37,7 @@ void dp_enable_link_phy( const struct dc_link_settings *link_settings); void dp_receiver_power_ctrl(struct dc_link *link, bool on); +void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode); void edp_add_delay_for_T9(struct dc_link *link); bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index fe1e5833c96a..372c0898facd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -49,6 +49,11 @@ struct resource_caps { int num_vmid; int num_dsc; unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output). + unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters). +#if defined(CONFIG_DRM_AMD_DC_DCN) + int num_hpo_dp_stream_encoder; + int num_hpo_dp_link_encoder; +#endif int num_mpc_3dlut; }; @@ -68,6 +73,15 @@ struct resource_create_funcs { struct stream_encoder *(*create_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)( + enum engine_id eng_id, struct dc_context *ctx); + + struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)( + uint8_t inst, + struct dc_context *ctx); +#endif + struct dce_hwseq *(*create_hwseq)( struct dc_context *ctx); }; @@ -187,4 +201,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe); int get_num_odm_splits(struct pipe_ctx *pipe); +#if defined(CONFIG_DRM_AMD_DC_DCN) +struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( + const struct resource_pool *pool); +#endif + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index 0d09181227c5..fd739aecf104 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -93,6 +93,16 @@ IRQ_DCN21 = irq_service_dcn21.o AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN21) + +############################################################################### +# DCN 201 +############################################################################### +IRQ_DCN201 = irq_service_dcn201.o + +AMD_DAL_IRQ_DCN201 = $(addprefix $(AMDDALPATH)/dc/irq/dcn201/,$(IRQ_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN201) + ############################################################################### # DCN 30 ############################################################################### diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index c4b067d01895..9ccafe007b23 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,6 +132,31 @@ enum dc_irq_source to_dal_irq_source_dcn20( } } +uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) +{ + const struct irq_source_info *info; + uint32_t addr; + uint32_t value; + uint32_t current_status; + + info = find_irq_source_info(irq_service, source); + if (!info) + return 0; + + addr = info->status_reg; + if (!addr) + return 0; + + value = dm_read_reg(irq_service->ctx, addr); + current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE); + + return current_status; +} + static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index aee4b37999f1..4d69ab24ca25 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,4 +31,6 @@ struct irq_service *dal_irq_service_dcn20_create( struct irq_service_init_data *init_data); +uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c new file mode 100644 index 000000000000..a47f68634fc3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -0,0 +1,374 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "include/logger_interface.h" + +#include "../dce110/irq_service_dce110.h" + +#include "dcn/dcn_2_0_3_offset.h" +#include "dcn/dcn_2_0_3_sh_mask.h" + +#include "cyan_skillfish_ip_offset.h" +#include "soc15_hw_ip.h" + +#include "irq_service_dcn201.h" + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +enum dc_irq_source to_dal_irq_source_dcn201( + struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) +{ + switch (src_id) { + case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK1; + case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK2; + case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC1_VLINE0; + case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: + return DC_IRQ_SOURCE_DC2_VLINE0; + case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP1; + case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP2; + case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE1; + case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE2; + case DCN_1_0__SRCID__DC_HPD1_INT: + /* generic src_id for all HPD and HPDRX interrupts */ + switch (ext_id) { + case DCN_1_0__CTXID__DC_HPD1_INT: + return DC_IRQ_SOURCE_HPD1; + case DCN_1_0__CTXID__DC_HPD2_INT: + return DC_IRQ_SOURCE_HPD2; + case DCN_1_0__CTXID__DC_HPD1_RX_INT: + return DC_IRQ_SOURCE_HPD1RX; + case DCN_1_0__CTXID__DC_HPD2_RX_INT: + return DC_IRQ_SOURCE_HPD2RX; + default: + return DC_IRQ_SOURCE_INVALID; + } + break; + + default: + return DC_IRQ_SOURCE_INVALID; + } + return DC_IRQ_SOURCE_INVALID; +} + +static bool hpd_ack( + struct irq_service *irq_service, + const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value( + value, + current_status ? 0 : 1, + HPD0_DC_HPD_INT_CONTROL, + DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +static const struct irq_source_info_funcs hpd_irq_info_funcs = { + .set = NULL, + .ack = hpd_ack +}; + +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs pflip_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vline0_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +#undef BASE_INNER +#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +/* compile time expand base address. */ +#define BASE(seg) \ + BASE_INNER(seg) + +#define SRI(reg_name, block, id)\ + BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\ + .enable_reg = SRI(reg1, block, reg_num),\ + .enable_mask = \ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + .enable_value = {\ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ + },\ + .ack_reg = SRI(reg2, block, reg_num),\ + .ack_mask = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ + .ack_value = \ + block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ + +#define hpd_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1 + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_irq_info_funcs\ + } + +#define hpd_rx_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_rx_irq_info_funcs\ + } +#define pflip_int_entry(reg_num)\ + [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\ + IRQ_REG_ENTRY(HUBPREQ, reg_num,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ + .funcs = &pflip_irq_info_funcs\ + } + +#define vupdate_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } + +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ + } +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } + +#define vline0_int_entry(reg_num)\ + [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\ + OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\ + .funcs = &vline0_irq_info_funcs\ + } + +#define dummy_irq_entry() \ + {\ + .funcs = &dummy_irq_info_funcs\ + } + +#define i2c_int_entry(reg_num) \ + [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry() + +#define dp_sink_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry() + +#define gpio_pad_int_entry(reg_num) \ + [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry() + +#define dc_underflow_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() + +static const struct irq_source_info_funcs dummy_irq_info_funcs = { + .set = dal_irq_service_dummy_set, + .ack = dal_irq_service_dummy_ack +}; + +static const struct irq_source_info +irq_source_info_dcn201[DAL_IRQ_SOURCES_NUMBER] = { + [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(), + hpd_int_entry(0), + hpd_int_entry(1), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + hpd_rx_int_entry(0), + hpd_rx_int_entry(1), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + i2c_int_entry(1), + i2c_int_entry(2), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dp_sink_int_entry(1), + dp_sink_int_entry(2), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(), + pflip_int_entry(0), + pflip_int_entry(1), + pflip_int_entry(2), + pflip_int_entry(3), + [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(), + [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(), + [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), + gpio_pad_int_entry(0), + gpio_pad_int_entry(1), + gpio_pad_int_entry(2), + gpio_pad_int_entry(3), + gpio_pad_int_entry(4), + gpio_pad_int_entry(5), + gpio_pad_int_entry(6), + gpio_pad_int_entry(7), + gpio_pad_int_entry(8), + gpio_pad_int_entry(9), + gpio_pad_int_entry(10), + gpio_pad_int_entry(11), + gpio_pad_int_entry(12), + gpio_pad_int_entry(13), + gpio_pad_int_entry(14), + gpio_pad_int_entry(15), + gpio_pad_int_entry(16), + gpio_pad_int_entry(17), + gpio_pad_int_entry(18), + gpio_pad_int_entry(19), + gpio_pad_int_entry(20), + gpio_pad_int_entry(21), + gpio_pad_int_entry(22), + gpio_pad_int_entry(23), + gpio_pad_int_entry(24), + gpio_pad_int_entry(25), + gpio_pad_int_entry(26), + gpio_pad_int_entry(27), + gpio_pad_int_entry(28), + gpio_pad_int_entry(29), + gpio_pad_int_entry(30), + dc_underflow_int_entry(1), + dc_underflow_int_entry(2), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + vblank_int_entry(0), + vblank_int_entry(1), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + vline0_int_entry(0), + vline0_int_entry(1), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), + dummy_irq_entry(), +}; + +static const struct irq_service_funcs irq_service_funcs_dcn201 = { + .to_dal_irq_source = to_dal_irq_source_dcn201 +}; + +static void dcn201_irq_construct( + struct irq_service *irq_service, + struct irq_service_init_data *init_data) +{ + dal_irq_service_construct(irq_service, init_data); + + irq_service->info = irq_source_info_dcn201; + irq_service->funcs = &irq_service_funcs_dcn201; +} + +struct irq_service *dal_irq_service_dcn201_create( + struct irq_service_init_data *init_data) +{ + struct irq_service *irq_service = kzalloc(sizeof(*irq_service), + GFP_KERNEL); + + if (!irq_service) + return NULL; + + dcn201_irq_construct(irq_service, init_data); + return irq_service; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h new file mode 100644 index 000000000000..8e27c5e219a3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h @@ -0,0 +1,34 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_IRQ_SERVICE_DCN201_H__ +#define __DAL_IRQ_SERVICE_DCN201_H__ + +#include "../irq_service.h" + +struct irq_service *dal_irq_service_dcn201_create( + struct irq_service_init_data *init_data); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index ed54e1c819be..78940cb20e10 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -135,6 +135,31 @@ enum dc_irq_source to_dal_irq_source_dcn21( return DC_IRQ_SOURCE_INVALID; } +uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) +{ + const struct irq_source_info *info; + uint32_t addr; + uint32_t value; + uint32_t current_status; + + info = find_irq_source_info(irq_service, source); + if (!info) + return 0; + + addr = info->status_reg; + if (!addr) + return 0; + + value = dm_read_reg(irq_service->ctx, addr); + current_status = + get_reg_field_value( + value, + HPD0_DC_HPD_INT_STATUS, + DC_HPD_SENSE); + + return current_status; +} + static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index da2bd0e93d7a..616470e32380 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,4 +31,6 @@ struct irq_service *dal_irq_service_dcn21_create( struct irq_service_init_data *init_data); +uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index a2a4fbeb83f8..4db1133e4466 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) *irq_service = NULL; } -static const struct irq_source_info *find_irq_source_info( +const struct irq_source_info *find_irq_source_info( struct irq_service *irq_service, enum dc_irq_source source) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index dbfcb096eedd..e60b82480093 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,6 +69,10 @@ struct irq_service { const struct irq_service_funcs *funcs; }; +const struct irq_source_info *find_irq_source_info( + struct irq_service *irq_service, + enum dc_irq_source source); + void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index f50cae252de4..5df1d80c8341 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -31,10 +31,12 @@ #include <linux/kref.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/delay.h> #include <asm/byteorder.h> #include <drm/drm_print.h> +#include <drm/drm_dp_helper.h> #include "cgs_common.h" diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c index 1053b165c139..1e39aae6b1cf 100644 --- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c @@ -69,9 +69,11 @@ static void virtual_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_blank( + struct dc_link *link, struct stream_encoder *enc) {} static void virtual_stream_encoder_dp_unblank( + struct dc_link *link, struct stream_encoder *enc, const struct encoder_unblank_param *param) {} @@ -102,7 +104,8 @@ static void virtual_setup_stereo_sync( static void virtual_stream_encoder_set_dsc_pps_info_packet( struct stream_encoder *enc, bool enable, - uint8_t *dsc_packed_pps) + uint8_t *dsc_packed_pps, + bool immediate_update) {} static const struct stream_encoder_funcs virtual_str_enc_funcs = { diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index caf961bb633f..83855b8a32e9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -84,6 +84,7 @@ enum dmub_status { DMUB_STATUS_QUEUE_FULL, DMUB_STATUS_TIMEOUT, DMUB_STATUS_INVALID, + DMUB_STATUS_HW_FAILURE, }; /* enum dmub_asic - dmub asic identifier */ @@ -96,6 +97,7 @@ enum dmub_asic { DMUB_ASIC_DCN302, DMUB_ASIC_DCN303, DMUB_ASIC_DCN31, + DMUB_ASIC_DCN31B, DMUB_ASIC_MAX, }; @@ -118,6 +120,7 @@ enum dmub_notification_type { DMUB_NOTIFICATION_AUX_REPLY, DMUB_NOTIFICATION_HPD, DMUB_NOTIFICATION_HPD_IRQ, + DMUB_NOTIFICATION_SET_CONFIG_REPLY, DMUB_NOTIFICATION_MAX }; @@ -235,6 +238,9 @@ struct dmub_srv_hw_params { bool load_inst_const; bool skip_panel_power_sequence; bool disable_z10; + bool power_optimization; + bool dpia_supported; + bool disable_dpia; }; /** @@ -354,10 +360,14 @@ struct dmub_srv_hw_funcs { uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub); + void (*clear_inbox0_ack_register)(struct dmub_srv *dmub); + uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub); void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data); uint32_t (*get_current_time)(struct dmub_srv *dmub); void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca); + + bool (*should_detect)(struct dmub_srv *dmub); }; /** @@ -401,6 +411,7 @@ struct dmub_srv { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; struct dmub_rb inbox1_rb; + uint32_t inbox1_last_wptr; /** * outbox1_rb is accessed without locks (dal & dc) * and to be used only in dmub_srv_stat_get_notification() @@ -437,6 +448,7 @@ struct dmub_notification { union { struct aux_reply_data aux_reply; enum dp_hpd_status hpd_status; + enum set_config_status sc_status; }; }; @@ -724,6 +736,47 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +bool dmub_srv_should_detect(struct dmub_srv *dmub); + +/** + * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0 + * @dmub: the dmub service + * @data: the data to be sent in the INBOX0 command + * + * Send command by writing directly to INBOX0 WPTR + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); + +/** + * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Wait for DMUB to ACK the INBOX0 message + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + * DMUB_STATUS_TIMEOUT - wait for ack timed out + */ +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us); + +/** + * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0 + * @dmub: the dmub service + * + * Clear ACK register for INBOX0 + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7efe9ba8706e..7eec65090862 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc +#define DMUB_FW_VERSION_GIT_HASH 0x465e619a #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 79 +#define DMUB_FW_VERSION_REVISION 94 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -173,13 +173,6 @@ extern "C" { #endif /** - * Number of nanoseconds per DMUB tick. - * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default. - * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true. - */ -#define NS_PER_DMUB_TICK 10 - -/** * union dmub_addr - DMUB physical/virtual 64-bit address. */ union dmub_addr { @@ -208,10 +201,9 @@ union dmub_psr_debug_flags { uint32_t use_hw_lock_mgr : 1; /** - * Unused. - * TODO: Remove. + * Use TPS3 signal when restore main link. */ - uint32_t log_line_nums : 1; + uint32_t force_wakeup_by_tps3 : 1; } bitfields; /** @@ -368,10 +360,15 @@ union dmub_fw_boot_options { uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */ uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */ uint32_t z10_disable: 1; /**< 1 to disable z10 */ - uint32_t reserved2: 1; /**< reserved for an unreleased feature */ - uint32_t reserved_unreleased1: 1; /**< reserved for an unreleased feature */ + uint32_t enable_dpia: 1; /**< 1 if DPIA should be enabled */ uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */ - uint32_t reserved : 23; /**< reserved */ + uint32_t dpia_supported: 1; /**< 1 if DPIA is supported on this platform */ + uint32_t sel_mux_phy_c_d_phy_f_g: 1; /**< 1 if PHYF/PHYG should be enabled */ + /**< 1 if all root clock gating is enabled and low power memory is enabled*/ + uint32_t power_optimization: 1; + uint32_t diag_env: 1; /* 1 if diagnostic environment */ + + uint32_t reserved : 19; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -653,6 +650,10 @@ enum dmub_cmd_type { */ DMUB_CMD__PANEL_CNTL = 74, /** + * Command type used for interfacing with DPIA. + */ + DMUB_CMD__DPIA = 77, + /** * Command type used for EDID CEA parsing */ DMUB_CMD__EDID_CEA = 79, @@ -674,6 +675,21 @@ enum dmub_out_cmd_type { * Command type used for DP AUX Reply data notification */ DMUB_OUT_CMD__DP_AUX_REPLY = 1, + /** + * Command type used for DP HPD event notification + */ + DMUB_OUT_CMD__DP_HPD_NOTIFY = 2, + /** + * Command type used for SET_CONFIG Reply notification + */ + DMUB_OUT_CMD__SET_CONFIG_REPLY = 3, +}; + +/* DMUB_CMD__DPIA command sub-types. */ +enum dmub_cmd_dpia_type { + DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0, + DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1, + DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2, }; #pragma pack(push, 1) @@ -973,7 +989,7 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */ uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */ uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */ - uint8_t reserved0; /**< For future use */ + uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */ uint8_t reserved1; /**< For future use */ uint8_t reserved2[3]; /**< For future use */ uint32_t reserved3[11]; /**< For future use */ @@ -996,6 +1012,77 @@ struct dmub_rb_cmd_dig1_transmitter_control { }; /** + * DPIA tunnel command parameters. + */ +struct dmub_cmd_dig_dpia_control_data { + uint8_t enc_id; /** 0 = ENGINE_ID_DIGA, ... */ + uint8_t action; /** ATOM_TRANSMITER_ACTION_DISABLE/ENABLE/SETUP_VSEMPH */ + union { + uint8_t digmode; /** enum atom_encode_mode_def */ + uint8_t dplaneset; /** DP voltage swing and pre-emphasis value */ + } mode_laneset; + uint8_t lanenum; /** Lane number 1, 2, 4, 8 */ + uint32_t symclk_10khz; /** Symbol Clock in 10Khz */ + uint8_t hpdsel; /** =0: HPD is not assigned */ + uint8_t digfe_sel; /** DIG stream( front-end ) selection, bit0 - DIG0 FE */ + uint8_t dpia_id; /** Index of DPIA */ + uint8_t fec_rdy : 1; + uint8_t reserved : 7; + uint32_t reserved1; +}; + +/** + * DMUB command for DPIA tunnel control. + */ +struct dmub_rb_cmd_dig1_dpia_control { + struct dmub_cmd_header header; + struct dmub_cmd_dig_dpia_control_data dpia_control; +}; + +/** + * SET_CONFIG Command Payload + */ +struct set_config_cmd_payload { + uint8_t msg_type; /* set config message type */ + uint8_t msg_data; /* set config message data */ +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + */ +struct dmub_cmd_set_config_control_data { + struct set_config_cmd_payload cmd_pkt; + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned in case of error */ +}; + +/** + * DMUB command structure for SET_CONFIG command. + */ +struct dmub_rb_cmd_set_config_access { + struct dmub_cmd_header header; /* header */ + struct dmub_cmd_set_config_control_data set_config_control; /* set config data */ +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. + */ +struct dmub_cmd_mst_alloc_slots_control_data { + uint8_t mst_alloc_slots; /* mst slots to be allotted */ + uint8_t instance; /* DPIA instance */ + uint8_t immed_status; /* Immediate status returned as there is no outbox msg posted */ + uint8_t mst_slots_in_use; /* returns slots in use for error cases */ +}; + +/** + * DMUB command structure for SET_ command. + */ +struct dmub_rb_cmd_set_mst_alloc_slots { + struct dmub_cmd_header header; /* header */ + struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */ +}; + +/** * struct dmub_rb_cmd_dpphy_init - DPPHY init. */ struct dmub_rb_cmd_dpphy_init { @@ -1242,6 +1329,33 @@ struct dmub_rb_cmd_dp_hpd_notify { struct dp_hpd_data hpd_data; }; +/** + * Definition of a SET_CONFIG reply from DPOA. + */ +enum set_config_status { + SET_CONFIG_PENDING = 0, + SET_CONFIG_ACK_RECEIVED, + SET_CONFIG_RX_TIMEOUT, + SET_CONFIG_UNKNOWN_ERROR, +}; + +/** + * Definition of a set_config reply + */ +struct set_config_reply_control_data { + uint8_t instance; /* DPIA Instance */ + uint8_t status; /* Set Config reply */ + uint16_t pad; /* Alignment */ +}; + +/** + * Definition of a DMUB_OUT_CMD__SET_CONFIG_REPLY command. + */ +struct dmub_rb_cmd_dp_set_config_reply { + struct dmub_cmd_header header; + struct set_config_reply_control_data set_config_reply_control; +}; + /* * Command IDs should be treated as stable ABI. * Do not reuse or modify IDs. @@ -1280,6 +1394,10 @@ enum dmub_cmd_psr_type { * Forces PSR enabled until an explicit PSR disable call. */ DMUB_CMD__PSR_FORCE_STATIC = 5, + /** + * Set PSR power option + */ + DMUB_CMD__SET_PSR_POWER_OPT = 7, }; /** @@ -1424,10 +1542,14 @@ struct dmub_cmd_psr_copy_settings_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /* + * DSC enable status in driver + */ + uint8_t dsc_enable_status; /** - * Explicit padding to 4 byte boundary. + * Explicit padding to 3 byte boundary. */ - uint8_t pad3[4]; + uint8_t pad3[3]; }; /** @@ -1578,6 +1700,44 @@ struct dmub_rb_cmd_psr_force_static { }; /** + * Data passed from driver to FW in a DMUB_CMD__SET_PSR_POWER_OPT command. + */ +struct dmub_cmd_psr_set_power_opt_data { + /** + * PSR control version. + */ + uint8_t cmd_version; + /** + * Panel Instance. + * Panel isntance to identify which psr_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; + /** + * PSR power option + */ + uint32_t power_opt; +}; + +/** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ +struct dmub_rb_cmd_psr_set_power_opt { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ + struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; +}; + +/** * Set of HW components that can be locked. * * Note: If updating with more HW components, fields @@ -1730,6 +1890,11 @@ enum dmub_cmd_abm_type { * Enable/disable fractional duty cycle for backlight PWM. */ DMUB_CMD__ABM_SET_PWM_FRAC = 5, + + /** + * unregister vertical interrupt after steady state is reached + */ + DMUB_CMD__ABM_PAUSE = 6, }; /** @@ -2086,6 +2251,50 @@ struct dmub_rb_cmd_abm_init_config { }; /** + * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command. + */ + +struct dmub_cmd_abm_pause_data { + + /** + * Panel Control HW instance mask. + * Bit 0 is Panel Control HW instance 0. + * Bit 1 is Panel Control HW instance 1. + */ + uint8_t panel_mask; + + /** + * OTG hw instance + */ + uint8_t otg_inst; + + /** + * Enable or disable ABM pause + */ + uint8_t enable; + + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[1]; +}; + +/** + * Definition of a DMUB_CMD__ABM_PAUSE command. + */ +struct dmub_rb_cmd_abm_pause { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command. + */ + struct dmub_cmd_abm_pause_data abm_pause_data; +}; + +/** * Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command. */ struct dmub_cmd_query_feature_caps_data { @@ -2312,6 +2521,10 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_psr_force_static psr_force_static; /** + * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command. + */ + struct dmub_rb_cmd_psr_set_power_opt psr_set_power_opt; + /** * Definition of a DMUB_CMD__PLAT_54186_WA command. */ struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa; @@ -2364,6 +2577,11 @@ union dmub_rb_cmd { struct dmub_rb_cmd_abm_init_config abm_init_config; /** + * Definition of a DMUB_CMD__ABM_PAUSE command. + */ + struct dmub_rb_cmd_abm_pause abm_pause; + + /** * Definition of a DMUB_CMD__DP_AUX_ACCESS command. */ struct dmub_rb_cmd_dp_aux_access dp_aux_access; @@ -2383,6 +2601,18 @@ union dmub_rb_cmd { */ struct dmub_rb_cmd_lvtma_control lvtma_control; /** + * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. + */ + struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control; + /** + * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command. + */ + struct dmub_rb_cmd_set_config_access set_config_access; + /** + * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command. + */ + struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots; + /** * Definition of a DMUB_CMD__EDID_CEA command. */ struct dmub_rb_cmd_edid_cea edid_cea; @@ -2404,6 +2634,10 @@ union dmub_rb_out_cmd { * HPD notify command. */ struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify; + /** + * SET_CONFIG reply command. + */ + struct dmub_rb_cmd_dp_set_config_reply set_config_reply; }; #pragma pack(pop) @@ -2484,14 +2718,16 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { - uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt; - const uint8_t *src = (const uint8_t *)cmd; + uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt); + const uint64_t *src = (const uint64_t *)cmd; + uint8_t i; if (dmub_rb_full(rb)) return false; // copying data - dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + *dst++ = *src++; rb->wrpt += DMUB_RB_CMD_SIZE; @@ -2600,14 +2836,16 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb, static inline bool dmub_rb_out_front(struct dmub_rb *rb, union dmub_rb_out_cmd *cmd) { - const uint8_t *src = (const uint8_t *)(rb->base_address) + rb->rptr; - uint8_t *dst = (uint8_t *)cmd; + const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr); + uint64_t *dst = (uint64_t *)cmd; + uint8_t i; if (dmub_rb_empty(rb)) return false; // copying data - dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + *dst++ = *src++; return true; } @@ -2642,14 +2880,17 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb) */ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) { - uint8_t buf[DMUB_RB_CMD_SIZE]; uint32_t rptr = rb->rptr; uint32_t wptr = rb->wrpt; while (rptr != wptr) { - const uint8_t *data = (const uint8_t *)rb->base_address + rptr; + uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr); + //uint64_t volatile *p = (uint64_t volatile *)data; + uint64_t temp; + uint8_t i; - dmub_memcpy(buf, data, DMUB_RB_CMD_SIZE); + for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++) + temp = *data++; rptr += DMUB_RB_CMD_SIZE; if (rptr >= rb->capacity) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index fc667cb17eb0..fa0569174aec 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -338,6 +338,11 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu union dmub_fw_boot_options boot_options = {0}; boot_options.bits.z10_disable = params->disable_z10; + boot_options.bits.dpia_supported = params->dpia_supported; + boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1; + boot_options.bits.power_optimization = params->power_optimization; + + boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -432,3 +437,11 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; } + +bool dmub_dcn31_should_detect(struct dmub_srv *dmub) +{ + uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0); + bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED; + return should_detect; +} + diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index bb62605d2ac8..59ddc81b5a0e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -245,4 +245,6 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub); void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data); +bool dmub_dcn31_should_detect(struct dmub_srv *dmub); + #endif /* _DMUB_DCN31_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 75a91cfaf036..f673a1c1777a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb) } static const struct dmub_fw_meta_info * -dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset) { const union dmub_fw_meta *meta; - const uint8_t *blob = NULL; - uint32_t blob_size = 0; - uint32_t meta_offset = 0; - - if (params->fw_bss_data && params->bss_data_size) { - /* Legacy metadata region. */ - blob = params->fw_bss_data; - blob_size = params->bss_data_size; - meta_offset = DMUB_FW_META_OFFSET; - } else if (params->fw_inst_const && params->inst_const_size) { - /* Combined metadata region. */ - blob = params->fw_inst_const; - blob_size = params->inst_const_size; - meta_offset = 0; - } if (!blob || !blob_size) return NULL; @@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) return &meta->info; } +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +{ + const struct dmub_fw_meta_info *info = NULL; + + if (params->fw_bss_data && params->bss_data_size) { + /* Legacy metadata region. */ + info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, + params->bss_data_size, + DMUB_FW_META_OFFSET); + } else if (params->fw_inst_const && params->inst_const_size) { + /* Combined metadata region - can be aligned to 16-bytes. */ + uint32_t i; + + for (i = 0; i < 16; ++i) { + info = dmub_get_fw_meta_info_from_blob( + params->fw_inst_const, params->inst_const_size, i); + + if (info) + break; + } + } + + return info; +} + static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; @@ -208,6 +219,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) break; case DMUB_ASIC_DCN31: + case DMUB_ASIC_DCN31B: dmub->regs_dcn31 = &dmub_srv_dcn31_regs; funcs->reset = dmub_dcn31_reset; funcs->reset_release = dmub_dcn31_reset_release; @@ -234,7 +246,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr; funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data; - + funcs->should_detect = dmub_dcn31_should_detect; funcs->get_current_time = dmub_dcn31_get_current_time; break; @@ -597,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) { + struct dmub_rb flush_rb; + if (!dmub->hw_init) return DMUB_STATUS_INVALID; @@ -605,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ - dmub_rb_flush_pending(&dmub->inbox1_rb); + flush_rb = dmub->inbox1_rb; + flush_rb.rptr = dmub->inbox1_last_wptr; + dmub_rb_flush_pending(&flush_rb); + + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; - dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); return DMUB_STATUS_OK; } @@ -655,13 +674,19 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, uint32_t timeout_us) { - uint32_t i; + uint32_t i, rptr; if (!dmub->hw_init) return DMUB_STATUS_INVALID; for (i = 0; i <= timeout_us; ++i) { - dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); + + if (rptr > dmub->inbox1_rb.capacity) + return DMUB_STATUS_HW_FAILURE; + + dmub->inbox1_rb.rptr = rptr; + if (dmub_rb_empty(&dmub->inbox1_rb)) return DMUB_STATUS_OK; @@ -816,3 +841,46 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_ dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); return true; } + +bool dmub_srv_should_detect(struct dmub_srv *dmub) +{ + if (!dmub->hw_init || !dmub->hw_funcs.should_detect) + return false; + + return dmub->hw_funcs.should_detect(dmub); +} + +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) +{ + if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.clear_inbox0_ack_register(dmub); + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) +{ + uint32_t i = 0; + uint32_t ack = 0; + + if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i++) { + ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); + if (ack) + return DMUB_STATUS_OK; + } + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, + union dmub_inbox0_data_register data) +{ + if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.send_inbox0_cmd(dmub, data); + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c index 70766d534c9c..44502ec919a2 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -76,6 +76,22 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, dmub_memcpy((void *)¬ify->aux_reply, (void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data)); break; + case DMUB_OUT_CMD__DP_HPD_NOTIFY: + if (cmd.dp_hpd_notify.hpd_data.hpd_type == DP_HPD) { + notify->type = DMUB_NOTIFICATION_HPD; + notify->hpd_status = cmd.dp_hpd_notify.hpd_data.hpd_status; + } else { + notify->type = DMUB_NOTIFICATION_HPD_IRQ; + } + + notify->link_index = cmd.dp_hpd_notify.hpd_data.instance; + notify->result = AUX_RET_SUCCESS; + break; + case DMUB_OUT_CMD__SET_CONFIG_REPLY: + notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY; + notify->link_index = cmd.set_config_reply.set_config_reply_control.instance; + notify->sc_status = cmd.set_config_reply.set_config_reply_control.status; + break; default: notify->type = DMUB_NOTIFICATION_NO_DATA; break; diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 76a87b682883..b8ffb216ebc4 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -152,6 +152,10 @@ struct bp_transmitter_control { enum signal_type signal; enum dc_color_depth color_depth; /* not used for DCE6.0 */ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum tx_ffe_id txffe_sel; /* used for DCN3 */ + enum engine_id hpo_engine_id; /* used for DCN3 */ +#endif struct graphics_object_id connector_obj_id; /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should * be pixel clock * deep_color_ratio (in KHz) @@ -319,6 +323,10 @@ struct bp_encoder_cap_info { uint32_t DP_HBR2_EN:1; uint32_t DP_HBR3_EN:1; uint32_t HDMI_6GB_EN:1; + uint32_t IS_DP2_CAPABLE:1; + uint32_t DP_UHBR10_EN:1; + uint32_t DP_UHBR13_5_EN:1; + uint32_t DP_UHBR20_EN:1; uint32_t DP_IS_USB_C:1; uint32_t RESERVED:27; }; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 381c17caace1..e4a2dfacab4c 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -211,6 +211,7 @@ enum { #ifndef ASICREV_IS_GREEN_SARDINE #define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF)) #endif +#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH #define FAMILY_VGH 144 #define DEVICE_ID_VGH_163F 0x163F #define VANGOGH_A0 0x01 @@ -227,7 +228,7 @@ enum { #define FAMILY_YELLOW_CARP 146 #define YELLOW_CARP_A0 0x01 -#define YELLOW_CARP_B0 0x02 // TODO: DCN31 - update with correct B0 ID +#define YELLOW_CARP_B0 0x20 #define YELLOW_CARP_UNKNOWN 0xFF #ifndef ASICREV_IS_YELLOW_CARP diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index fe75ec834892..012b7c61798c 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -50,6 +50,7 @@ enum dce_version { DCN_VERSION_1_0, DCN_VERSION_1_01, DCN_VERSION_2_0, + DCN_VERSION_2_01, DCN_VERSION_2_1, DCN_VERSION_3_0, DCN_VERSION_3_01, diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 4de59b66bb1a..a2b80514d83e 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@ #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C #define DP_BRANCH_DEVICE_ID_006037 0x006037 +#define DP_DEVICE_ID_38EC11 0x38EC11 enum ddc_result { DDC_RESULT_UNKNOWN = 0, DDC_RESULT_SUCESSFULL, @@ -117,4 +118,7 @@ struct av_sync_data { uint8_t aud_del_ins3;/* DPCD 0002Dh */ }; +static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; +static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; + #endif /* __DAL_DDC_SERVICE_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index aec7389aff37..ffd0df1701e6 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -80,6 +80,15 @@ enum dpcd_phy_test_patterns { PHY_TEST_PATTERN_CP2520_1, PHY_TEST_PATTERN_CP2520_2, PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */ + PHY_TEST_PATTERN_128b_132b_TPS1 = 0x8, + PHY_TEST_PATTERN_128b_132b_TPS2 = 0x10, + PHY_TEST_PATTERN_PRBS9 = 0x18, + PHY_TEST_PATTERN_PRBS11 = 0x20, + PHY_TEST_PATTERN_PRBS15 = 0x28, + PHY_TEST_PATTERN_PRBS23 = 0x30, + PHY_TEST_PATTERN_PRBS31 = 0x38, + PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40, + PHY_TEST_PATTERN_SQUARE_PULSE = 0x48, }; enum dpcd_test_dyn_range { @@ -135,7 +144,14 @@ enum dpcd_training_patterns { DPCD_TRAINING_PATTERN_1, DPCD_TRAINING_PATTERN_2, DPCD_TRAINING_PATTERN_3, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DPCD_TRAINING_PATTERN_4 = 7, + DPCD_128b_132b_TPS1 = 1, + DPCD_128b_132b_TPS2 = 2, + DPCD_128b_132b_TPS2_CDS = 3, +#else DPCD_TRAINING_PATTERN_4 = 7 +#endif }; /* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus @@ -149,6 +165,7 @@ enum dpcd_psr_sink_states { PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7, }; +#define DP_SOURCE_SEQUENCE 0x30c #define DP_SOURCE_TABLE_REVISION 0x310 #define DP_SOURCE_PAYLOAD_SIZE 0x311 #define DP_SOURCE_SINK_CAP 0x317 diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 792652236c61..dd974c428d23 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -328,6 +328,7 @@ struct integrated_info { uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID]; uint8_t checksum; + uint8_t fixdpvoltageswing; } ext_disp_conn_info; /* exiting long long time */ struct available_s_clk_list { diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h index 58bb42ed85ca..84b299ff500a 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h @@ -140,6 +140,18 @@ enum sync_source { SYNC_SOURCE_DUAL_GPU_PIN }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +enum tx_ffe_id { + TX_FFE0 = 0, + TX_FFE1, + TX_FFE2, + TX_FFE3, + TX_FFE_DeEmphasis_Only, + TX_FFE_PreShoot_Only, + TX_FFE_No_FFE, +}; +#endif + /* connector sizes in millimeters - from BiosParserTypes.hpp */ #define CONNECTOR_SIZE_DVI 40 #define CONNECTOR_SIZE_VGA 32 diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 33b3d755fe65..01775417cf4b 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -184,6 +184,14 @@ enum engine_id { ENGINE_ID_DACA, ENGINE_ID_DACB, ENGINE_ID_VCE, /* wireless display pseudo-encoder */ +#if defined(CONFIG_DRM_AMD_DC_DCN) + ENGINE_ID_HPO_0, + ENGINE_ID_HPO_1, + ENGINE_ID_HPO_DP_0, + ENGINE_ID_HPO_DP_1, + ENGINE_ID_HPO_DP_2, + ENGINE_ID_HPO_DP_3, +#endif ENGINE_ID_VIRTUAL, ENGINE_ID_COUNT, diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index c7fbb9c3ad6b..418fbf8c5c3a 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -41,6 +41,8 @@ struct aux_payload { * reset it to read data */ bool write; bool mot; + bool write_status_update; + uint32_t address; uint32_t length; uint8_t *data; @@ -53,6 +55,7 @@ struct aux_payload { * zero means "use default value" */ uint32_t defer_delay; + }; struct aux_command { diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 32f5274ed34e..424bccd36434 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -53,7 +53,11 @@ enum edp_revision { }; enum { - LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/ + LINK_RATE_REF_FREQ_IN_KHZ = 27000, /*27MHz*/ + BITS_PER_DP_BYTE = 10, + DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */ + DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */ + DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */ }; enum link_training_result { @@ -70,6 +74,12 @@ enum link_training_result { LINK_TRAINING_LINK_LOSS, /* Abort link training (because sink unplugged) */ LINK_TRAINING_ABORT, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_128b_132b_LT_FAILED, + DP_128b_132b_MAX_LOOP_COUNT_REACHED, + DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT, + DP_128b_132b_CDS_DONE_TIMEOUT, +#endif }; enum lttpr_mode { @@ -80,21 +90,58 @@ enum lttpr_mode { struct link_training_settings { struct dc_link_settings link_settings; - struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; + /* TODO: turn lane settings below into mandatory fields + * as initial lane configuration + */ + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - factor lane_settings out because it changes during LT */ + union dc_dp_ffe_preset *ffe_preset; +#endif uint16_t cr_pattern_time; uint16_t eq_pattern_time; + uint16_t cds_pattern_time; enum dc_dp_training_pattern pattern_for_cr; enum dc_dp_training_pattern pattern_for_eq; +#if defined(CONFIG_DRM_AMD_DC_DCN) + enum dc_dp_training_pattern pattern_for_cds; + + uint32_t eq_wait_time_limit; + uint8_t eq_loop_count_limit; + uint32_t cds_wait_time_limit; +#endif bool enhanced_framing; - bool allow_invalid_msa_timing_param; enum lttpr_mode lttpr_mode; + + /* disallow different lanes to have different lane settings */ + bool disallow_per_lane_settings; + /* dpcd lane settings will always use the same hw lane settings + * even if it doesn't match requested lane adjust */ + bool always_match_dpcd_with_hw_lane_settings; + + /***************************************************************** + * training states - parameters that can change in link training + *****************************************************************/ + /* TODO: Move hw_lane_settings and dpcd_lane_settings + * along with lane adjust, lane align, offset and all + * other training states into a new structure called + * training states, so link_training_settings becomes + * a constant input pre-decided prior to link training. + * + * The goal is to strictly decouple link training settings + * decision making process from link training states to + * prevent it from messy code practice of changing training + * decision on the fly. + */ + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX]; + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]; }; /*TODO: Move this enum test harness*/ @@ -114,13 +161,30 @@ enum dp_test_pattern { DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_CP2520_3, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_TEST_PATTERN_128b_132b_TPS1, + DP_TEST_PATTERN_128b_132b_TPS2, + DP_TEST_PATTERN_PRBS9, + DP_TEST_PATTERN_PRBS11, + DP_TEST_PATTERN_PRBS15, + DP_TEST_PATTERN_PRBS23, + DP_TEST_PATTERN_PRBS31, + DP_TEST_PATTERN_264BIT_CUSTOM, + DP_TEST_PATTERN_SQUARE_PULSE, +#endif /* Link Training Patterns */ DP_TEST_PATTERN_TRAINING_PATTERN1, DP_TEST_PATTERN_TRAINING_PATTERN2, DP_TEST_PATTERN_TRAINING_PATTERN3, DP_TEST_PATTERN_TRAINING_PATTERN4, +#if defined(CONFIG_DRM_AMD_DC_DCN) + DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE, + DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, + DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, +#else DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4, +#endif /* link test patterns*/ DP_TEST_PATTERN_COLOR_SQUARES, @@ -152,6 +216,22 @@ enum dp_panel_mode { DP_PANEL_MODE_SPECIAL }; +enum dpcd_source_sequence { + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG = 1, /*done in apply_single_controller_ctx_to_hw */ + DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR, /*done in core_link_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME, /*done in core_link_enable_stream/dcn20_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE, /*done in perform_link_training_with_retries/dcn20_enable_stream */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY, /*done in dp_enable_link_phy */ + DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN, /*done in dp_set_hw_test_pattern */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM, /*done in dce110_enable_audio_stream */ + DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_unblank */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_blank */ + DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET, /*done in enc1_stream_encoder_dp_blank */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM, /*done in dce110_disable_audio_stream */ + DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY, /*done in dp_disable_link_phy */ + DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE, /*done in dce110_disable_stream */ +}; + /* DPCD_ADDR_TRAINING_LANEx_SET registers value */ union dpcd_training_lane_set { struct { diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 571fcf23cea9..f093b49c5e6e 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,6 +72,7 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) struct dal_logger; @@ -123,6 +124,7 @@ enum dc_log_type { LOG_MAX_HW_POINTS, LOG_ALL_TF_CHANNELS, LOG_SAMPLE_1DLUT, + LOG_DP2, LOG_SECTION_TOTAL_COUNT }; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index ef742d95ef05..64a38f08f497 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -54,12 +54,17 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2]; * just multiply with 2^gamma which can be computed once, and save the result so we * recursively compute all the values. */ - /*sRGB 709 2.2 2.4 P3*/ -static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0}; -static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0}; -static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0}; -static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0}; -static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600}; + +/* + * Regamma coefficients are used for both regamma and degamma. Degamma + * coefficients are calculated in our formula using the regamma coefficients. + */ + /*sRGB 709 2.2 2.4 P3*/ +static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0}; +static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0}; +static const int32_t numerator03[] = { 55, 99, 0, 0, 0}; +static const int32_t numerator04[] = { 55, 99, 0, 0, 0}; +static const int32_t numerator05[] = { 2400, 2200, 2200, 2400, 2600}; /* one-time setup of X points */ void setup_x_points_distribution(void) @@ -288,7 +293,8 @@ struct dividers { }; -static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_transfer_func_predefined type) +static bool build_coefficients(struct gamma_coefficients *coefficients, + enum dc_transfer_func_predefined type) { uint32_t i = 0; @@ -312,15 +318,15 @@ static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_ do { coefficients->a0[i] = dc_fixpt_from_fraction( - gamma_numerator01[index], 10000000); + numerator01[index], 10000000); coefficients->a1[i] = dc_fixpt_from_fraction( - gamma_numerator02[index], 1000); + numerator02[index], 1000); coefficients->a2[i] = dc_fixpt_from_fraction( - gamma_numerator03[index], 1000); + numerator03[index], 1000); coefficients->a3[i] = dc_fixpt_from_fraction( - gamma_numerator04[index], 1000); + numerator04[index], 1000); coefficients->user_gamma[i] = dc_fixpt_from_fraction( - gamma_numerator05[index], 1000); + numerator05[index], 1000); ++i; } while (i != ARRAY_SIZE(coefficients->a0)); @@ -1685,7 +1691,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma struct pwl_float_data_ex *rgb = rgb_regamma; const struct hw_x_point *coord_x = coordinates_x; - build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB); + build_coefficients(&coeff, true); i = 0; while (i != hw_points_num + 1) { diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index b99aa232bd8b..bd1d1dc93629 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -155,9 +155,18 @@ static unsigned int calc_v_total_from_duration( if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; - v_total = div64_u64(div64_u64(((unsigned long long)( - duration_in_us) * (stream->timing.pix_clk_100hz / 10)), - stream->timing.h_total), 1000); + if (dc_is_hdmi_signal(stream->signal)) { + uint32_t h_total_up_scaled; + + h_total_up_scaled = stream->timing.h_total * 10000; + v_total = div_u64((unsigned long long)duration_in_us + * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), + h_total_up_scaled); + } else { + v_total = div64_u64(div64_u64(((unsigned long long)( + duration_in_us) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total), 1000); + } /* v_total cannot be less than nominal */ if (v_total < stream->timing.v_total) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e9bd84ec027d..be61975f1470 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3( dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); + mutex_unlock(&psp->dtm_context.mutex); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { status = remove_display_from_topology_v2(hdcp, index); @@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3( HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); } - mutex_unlock(&psp->dtm_context.mutex); - return status; } @@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3( dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational; psp_dtm_invoke(psp, dtm_cmd->cmd_id); + mutex_unlock(&psp->dtm_context.mutex); if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { status = add_display_to_topology_v2(hdcp, display); @@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3( HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); } - mutex_unlock(&psp->dtm_context.mutex); - return status; } diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index f37101f5a777..6d648c889866 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -249,6 +249,8 @@ struct mod_hdcp_link { uint8_t ddc_line; uint8_t link_enc_idx; uint8_t phy_idx; + uint8_t dio_output_type; + uint8_t dio_output_id; uint8_t hdcp_supported_informational; union { struct mod_hdcp_displayport dp; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 257f280d3d53..4b9e68a79f06 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -98,7 +98,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_MES, - AMD_IP_BLOCK_TYPE_JPEG + AMD_IP_BLOCK_TYPE_JPEG, + AMD_IP_BLOCK_TYPE_NUM, }; enum amd_clockgating_state { @@ -228,7 +229,7 @@ enum DC_FEATURE_MASK { DC_FBC_MASK = (1 << 0), //0x1, disabled by default DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default - DC_PSR_MASK = (1 << 3), //0x8, disabled by default + DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default }; @@ -236,7 +237,8 @@ enum DC_DEBUG_MASK { DC_DISABLE_PIPE_SPLIT = 0x1, DC_DISABLE_STUTTER = 0x2, DC_DISABLE_DSC = 0x4, - DC_DISABLE_CLOCK_GATING = 0x8 + DC_DISABLE_CLOCK_GATING = 0x8, + DC_DISABLE_PSR = 0x10, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h new file mode 100755 index 000000000000..c56ca9740933 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + + +#ifndef _clk_11_0_1_OFFSET_HEADER +#define _clk_11_0_1_OFFSET_HEADER + +#define mmCLK4_0_CLK4_CLK_PLL_REQ 0x460e +#define mmCLK4_0_CLK4_CLK_PLL_REQ_BASE_IDX 0 + +#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT 0x467f +#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT_BASE_IDX 0 + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h new file mode 100755 index 000000000000..168fbf9fcd48 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _clk_11_0_1_SH_MASK_HEADER +#define _clk_11_0_1_SH_MASK_HEADER + +//CLK4_0_CLK4_CLK_PLL_REQ +#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int__SHIFT 0x0 +#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc +#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10 +#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL +#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L +#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L + +//CLK4_0_CLK4_CLK2_CURRENT_CNT +#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0 +#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL + +#endif
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h new file mode 100755 index 000000000000..cae1a7e74323 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h @@ -0,0 +1,6193 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _dcn_2_0_3_OFFSET_HEADER +#define _dcn_2_0_3_OFFSET_HEADER + + +// addressBlock: dce_dc_dccg_dccg_dispdec +// base address: 0x0 +#define mmPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040 +#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define mmPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041 +#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define mmDP_DTO_DBUF_EN 0x0044 +#define mmDP_DTO_DBUF_EN_BASE_IDX 1 +#define mmDPREFCLK_CGTT_BLK_CTRL_REG 0x0048 +#define mmDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmREFCLK_CNTL 0x0049 +#define mmREFCLK_CNTL_BASE_IDX 1 +#define mmREFCLK_CGTT_BLK_CTRL_REG 0x004b +#define mmREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmDCCG_PERFMON_CNTL2 0x004e +#define mmDCCG_PERFMON_CNTL2_BASE_IDX 1 +#define mmDCCG_DS_DTO_INCR 0x0053 +#define mmDCCG_DS_DTO_INCR_BASE_IDX 1 +#define mmDCCG_DS_DTO_MODULO 0x0054 +#define mmDCCG_DS_DTO_MODULO_BASE_IDX 1 +#define mmDCCG_DS_CNTL 0x0055 +#define mmDCCG_DS_CNTL_BASE_IDX 1 +#define mmDCCG_DS_HW_CAL_INTERVAL 0x0056 +#define mmDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1 +#define mmDPREFCLK_CNTL 0x0058 +#define mmDPREFCLK_CNTL_BASE_IDX 1 +#define mmDCE_VERSION 0x005e +#define mmDCE_VERSION_BASE_IDX 1 +#define mmDCCG_GTC_CNTL 0x0060 +#define mmDCCG_GTC_CNTL_BASE_IDX 1 +#define mmDCCG_GTC_DTO_INCR 0x0061 +#define mmDCCG_GTC_DTO_INCR_BASE_IDX 1 +#define mmDCCG_GTC_DTO_MODULO 0x0062 +#define mmDCCG_GTC_DTO_MODULO_BASE_IDX 1 +#define mmDCCG_GTC_CURRENT 0x0063 +#define mmDCCG_GTC_CURRENT_BASE_IDX 1 +#define mmDSCCLK0_DTO_PARAM 0x006c +#define mmDSCCLK0_DTO_PARAM_BASE_IDX 1 +#define mmMILLISECOND_TIME_BASE_DIV 0x0070 +#define mmMILLISECOND_TIME_BASE_DIV_BASE_IDX 1 +#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0071 +#define mmDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1 +#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072 +#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1 +#define mmDCCG_PERFMON_CNTL 0x0073 +#define mmDCCG_PERFMON_CNTL_BASE_IDX 1 +#define mmDCCG_GATE_DISABLE_CNTL 0x0074 +#define mmDCCG_GATE_DISABLE_CNTL_BASE_IDX 1 +#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0075 +#define mmDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmSOCCLK_CGTT_BLK_CTRL_REG 0x0076 +#define mmSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmDCCG_CAC_STATUS 0x0077 +#define mmDCCG_CAC_STATUS_BASE_IDX 1 +#define mmMICROSECOND_TIME_BASE_DIV 0x007b +#define mmMICROSECOND_TIME_BASE_DIV_BASE_IDX 1 +#define mmDCCG_GATE_DISABLE_CNTL2 0x007c +#define mmDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1 +#define mmSYMCLK_CGTT_BLK_CTRL_REG 0x007d +#define mmSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmDCCG_DISP_CNTL_REG 0x007f +#define mmDCCG_DISP_CNTL_REG_BASE_IDX 1 +#define mmOTG0_PIXEL_RATE_CNTL 0x0080 +#define mmOTG0_PIXEL_RATE_CNTL_BASE_IDX 1 +#define mmDP_DTO0_PHASE 0x0081 +#define mmDP_DTO0_PHASE_BASE_IDX 1 +#define mmDP_DTO0_MODULO 0x0082 +#define mmDP_DTO0_MODULO_BASE_IDX 1 +#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083 +#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1 +#define mmOTG1_PIXEL_RATE_CNTL 0x0084 +#define mmOTG1_PIXEL_RATE_CNTL_BASE_IDX 1 +#define mmDP_DTO1_PHASE 0x0085 +#define mmDP_DTO1_PHASE_BASE_IDX 1 +#define mmDP_DTO1_MODULO 0x0086 +#define mmDP_DTO1_MODULO_BASE_IDX 1 +#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087 +#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1 +#define mmDPPCLK_CGTT_BLK_CTRL_REG 0x0098 +#define mmDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1 +#define mmDPPCLK0_DTO_PARAM 0x0099 +#define mmDPPCLK0_DTO_PARAM_BASE_IDX 1 +#define mmDPPCLK1_DTO_PARAM 0x009a +#define mmDPPCLK1_DTO_PARAM_BASE_IDX 1 +#define mmDPPCLK2_DTO_PARAM 0x009b +#define mmDPPCLK2_DTO_PARAM_BASE_IDX 1 +#define mmDPPCLK3_DTO_PARAM 0x009c +#define mmDPPCLK3_DTO_PARAM_BASE_IDX 1 +#define mmDCCG_CAC_STATUS2 0x009f +#define mmDCCG_CAC_STATUS2_BASE_IDX 1 +#define mmSYMCLKA_CLOCK_ENABLE 0x00a0 +#define mmSYMCLKA_CLOCK_ENABLE_BASE_IDX 1 +#define mmSYMCLKB_CLOCK_ENABLE 0x00a1 +#define mmSYMCLKB_CLOCK_ENABLE_BASE_IDX 1 +#define mmDCCG_SOFT_RESET 0x00a6 +#define mmDCCG_SOFT_RESET_BASE_IDX 1 +#define mmDSCCLK_DTO_CTRL 0x00a7 +#define mmDSCCLK_DTO_CTRL_BASE_IDX 1 +#define mmDCCG_AUDIO_DTO_SOURCE 0x00ab +#define mmDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1 +#define mmDCCG_AUDIO_DTO0_PHASE 0x00ac +#define mmDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1 +#define mmDCCG_AUDIO_DTO0_MODULE 0x00ad +#define mmDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1 +#define mmDCCG_AUDIO_DTO1_PHASE 0x00ae +#define mmDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1 +#define mmDCCG_AUDIO_DTO1_MODULE 0x00af +#define mmDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0 +#define mmDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1 +#define mmDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2 +#define mmDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3 +#define mmDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4 +#define mmDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1 +#define mmDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5 +#define mmDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1 +#define mmDPPCLK_DTO_CTRL 0x00b6 +#define mmDPPCLK_DTO_CTRL_BASE_IDX 1 +#define mmDCCG_VSYNC_CNT_CTRL 0x00b8 +#define mmDCCG_VSYNC_CNT_CTRL_BASE_IDX 1 +#define mmDCCG_VSYNC_CNT_INT_CTRL 0x00b9 +#define mmDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1 + + +// addressBlock: dce_dc_dccg_dccg_dfs_dispdec +// base address: 0x0 +#define mmDENTIST_DISPCLK_CNTL 0x0064 +#define mmDENTIST_DISPCLK_CNTL_BASE_IDX 1 + + +// addressBlock: dce_dc_dmu_rbbmif_dispdec +// base address: 0x0 +#define mmRBBMIF_TIMEOUT 0x005b +#define mmRBBMIF_TIMEOUT_BASE_IDX 2 +#define mmRBBMIF_STATUS 0x005c +#define mmRBBMIF_STATUS_BASE_IDX 2 +#define mmRBBMIF_STATUS_2 0x005d +#define mmRBBMIF_STATUS_2_BASE_IDX 2 +#define mmRBBMIF_INT_STATUS 0x005e +#define mmRBBMIF_INT_STATUS_BASE_IDX 2 +#define mmRBBMIF_TIMEOUT_DIS 0x005f +#define mmRBBMIF_TIMEOUT_DIS_BASE_IDX 2 +#define mmRBBMIF_TIMEOUT_DIS_2 0x0060 +#define mmRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2 +#define mmRBBMIF_STATUS_FLAG 0x0061 +#define mmRBBMIF_STATUS_FLAG_BASE_IDX 2 + +// addressBlock: dce_dc_hda_az_misc_dispdec +// base address: 0x0 +#define mmAZ_CLOCK_CNTL 0x0372 +#define mmAZ_CLOCK_CNTL_BASE_IDX 2 + + +// addressBlock: dce_dc_hda_azf0endpoint0_dispdec +// base address: 0x0 +#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386 +#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2 +#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387 +#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_hda_azf0endpoint1_dispdec +// base address: 0x18 +#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c +#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2 +#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d +#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_hda_azf0controller_dispdec +// base address: 0x0 +#define mmAZALIA_CONTROLLER_CLOCK_GATING 0x03c2 +#define mmAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2 +#define mmAZALIA_AUDIO_DTO 0x03c3 +#define mmAZALIA_AUDIO_DTO_BASE_IDX 2 +#define mmAZALIA_AUDIO_DTO_CONTROL 0x03c4 +#define mmAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2 +#define mmAZALIA_SOCCLK_CONTROL 0x03c5 +#define mmAZALIA_SOCCLK_CONTROL_BASE_IDX 2 +#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6 +#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2 +#define mmAZALIA_DATA_DMA_CONTROL 0x03c7 +#define mmAZALIA_DATA_DMA_CONTROL_BASE_IDX 2 +#define mmAZALIA_BDL_DMA_CONTROL 0x03c8 +#define mmAZALIA_BDL_DMA_CONTROL_BASE_IDX 2 +#define mmAZALIA_RIRB_AND_DP_CONTROL 0x03c9 +#define mmAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2 +#define mmAZALIA_CORB_DMA_CONTROL 0x03ca +#define mmAZALIA_CORB_DMA_CONTROL_BASE_IDX 2 +#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x03d1 +#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_BASE_IDX 2 +#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x03d2 +#define mmAZALIA_CYCLIC_BUFFER_SYNC_BASE_IDX 2 +#define mmAZALIA_GLOBAL_CAPABILITIES 0x03d3 +#define mmAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2 +#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4 +#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2 +#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5 +#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2 +#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6 +#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC0_CONTROL0 0x03d9 +#define mmAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC0_CONTROL1 0x03da +#define mmAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC0_CONTROL2 0x03db +#define mmAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC0_CONTROL3 0x03dc +#define mmAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC0_RESULT 0x03dd +#define mmAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC1_CONTROL0 0x03de +#define mmAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC1_CONTROL1 0x03df +#define mmAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC1_CONTROL2 0x03e0 +#define mmAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC1_CONTROL3 0x03e1 +#define mmAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2 +#define mmAZALIA_INPUT_CRC1_RESULT 0x03e2 +#define mmAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2 +#define mmAZALIA_MEM_PWR_CTRL 0x03ee +#define mmAZALIA_MEM_PWR_CTRL_BASE_IDX 2 +#define mmAZALIA_MEM_PWR_STATUS 0x03ef +#define mmAZALIA_MEM_PWR_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_hda_azf0root_dispdec +// base address: 0x0 +#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406 +#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407 +#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408 +#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409 +#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d +#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411 +#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2 +#define mmREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c +#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2 +#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d +#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2 + +// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec +// base address: 0x0 +#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a +#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2 +#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b +#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec +// base address: 0x10 +#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e +#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2 +#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f +#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec +// base address: 0x0 +#define mmDCHUBBUB_RET_PATH_DCC_CFG 0x04cf +#define mmDCHUBBUB_RET_PATH_DCC_CFG_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0 0x04d0 +#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1 0x04d1 +#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0 0x04d2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1 0x04d3 +#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0 0x04d4 +#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1 0x04d5 +#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0 0x04d6 +#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1 0x04d7 +#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0 0x04d8 +#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1 0x04d9 +#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0 0x04da +#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1 0x04db +#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0 0x04dc +#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1 0x04dd +#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0 0x04de +#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1 0x04df +#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04ef +#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04f0 +#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDCHUBBUB_CRC_CTRL 0x04f1 +#define mmDCHUBBUB_CRC_CTRL_BASE_IDX 2 +#define mmDCHUBBUB_CRC0_VAL_R_G 0x04f2 +#define mmDCHUBBUB_CRC0_VAL_R_G_BASE_IDX 2 +#define mmDCHUBBUB_CRC0_VAL_B_A 0x04f3 +#define mmDCHUBBUB_CRC0_VAL_B_A_BASE_IDX 2 +#define mmDCHUBBUB_CRC1_VAL_R_G 0x04f4 +#define mmDCHUBBUB_CRC1_VAL_R_G_BASE_IDX 2 +#define mmDCHUBBUB_CRC1_VAL_B_A 0x04f5 +#define mmDCHUBBUB_CRC1_VAL_B_A_BASE_IDX 2 + +// addressBlock: dce_dc_dchubbub_hubbub_dispdec +// base address: 0x0 +#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x0505 +#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2 +#define mmDCHUBBUB_ARB_SAT_LEVEL 0x0506 +#define mmDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2 +#define mmDCHUBBUB_ARB_QOS_FORCE 0x0507 +#define mmDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2 +#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL 0x0508 +#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x0509 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A 0x050a +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_BASE_IDX 2 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A 0x050d +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_BASE_IDX 2 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x050e +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B 0x050f +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_BASE_IDX 2 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B 0x0512 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_BASE_IDX 2 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0513 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C 0x0514 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_BASE_IDX 2 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C 0x0517 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_BASE_IDX 2 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x0518 +#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D 0x0519 +#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_BASE_IDX 2 +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D 0x051c +#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_BASE_IDX 2 +#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x051d +#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2 +#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE 0x051e +#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2 +#define mmDCHUBBUB_GLOBAL_TIMER_CNTL 0x051f +#define mmDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2 +#define mmVTG0_CONTROL 0x0528 +#define mmVTG0_CONTROL_BASE_IDX 2 +#define mmVTG1_CONTROL 0x0529 +#define mmVTG1_CONTROL_BASE_IDX 2 +#define mmDCHUBBUB_SOFT_RESET 0x052e +#define mmDCHUBBUB_SOFT_RESET_BASE_IDX 2 +#define mmDCHUBBUB_CLOCK_CNTL 0x052f +#define mmDCHUBBUB_CLOCK_CNTL_BASE_IDX 2 +#define mmDCFCLK_CNTL 0x0530 +#define mmDCFCLK_CNTL_BASE_IDX 2 +#define mmDCHUBBUB_VLINE_SNAPSHOT 0x0533 +#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2 +#define mmDCHUBBUB_CTRL_STATUS 0x0534 +#define mmDCHUBBUB_CTRL_STATUS_BASE_IDX 2 +#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x053a +#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2 +#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x053b +#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2 +#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x053c +#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2 +#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053d +#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053e +#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2 +#define mmFMON_CTRL 0x0548 +#define mmFMON_CTRL_BASE_IDX 2 + + + +// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec +// base address: 0x0 +#define mmHUBP0_DCSURF_SURFACE_CONFIG 0x05e5 +#define mmHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2 +#define mmHUBP0_DCSURF_ADDR_CONFIG 0x05e6 +#define mmHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2 +#define mmHUBP0_DCSURF_TILING_CONFIG 0x05e7 +#define mmHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb +#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed +#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee +#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef +#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0 +#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1 +#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2 +#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2 +#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2 +#define mmHUBP0_DCHUBP_CNTL 0x05f3 +#define mmHUBP0_DCHUBP_CNTL_BASE_IDX 2 +#define mmHUBP0_HUBP_CLK_CNTL 0x05f4 +#define mmHUBP0_HUBP_CLK_CNTL_BASE_IDX 2 +#define mmHUBP0_HUBPREQ_DEBUG_DB 0x05f6 +#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP0_HUBPREQ_DEBUG 0x05f7 +#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2 +#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fb +#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 +#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fc +#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec +// base address: 0x0 +#define mmHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607 +#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608 +#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d +#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611 +#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615 +#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619 +#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a +#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b +#define mmHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c +#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x0620 +#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE 0x0621 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0622 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0623 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0624 +#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0625 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0626 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0627 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0628 +#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ0_DCN_EXPANSION_MODE 0x062c +#define mmHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2 +#define mmHUBPREQ0_DCN_TTU_QOS_WM 0x062d +#define mmHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2 +#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062e +#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2 +#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062f +#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x0630 +#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x0631 +#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x0632 +#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x0633 +#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0634 +#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ0_BLANK_OFFSET_0 0x0646 +#define mmHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2 +#define mmHUBPREQ0_BLANK_OFFSET_1 0x0647 +#define mmHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2 +#define mmHUBPREQ0_DST_DIMENSIONS 0x0648 +#define mmHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2 +#define mmHUBPREQ0_DST_AFTER_SCALER 0x0649 +#define mmHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2 +#define mmHUBPREQ0_PREFETCH_SETTINGS 0x064a +#define mmHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ0_PREFETCH_SETTINGS_C 0x064b +#define mmHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2 +#define mmHUBPREQ0_VBLANK_PARAMETERS_0 0x064c +#define mmHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ0_VBLANK_PARAMETERS_1 0x064d +#define mmHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2 +#define mmHUBPREQ0_VBLANK_PARAMETERS_2 0x064e +#define mmHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ0_VBLANK_PARAMETERS_3 0x064f +#define mmHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2 +#define mmHUBPREQ0_VBLANK_PARAMETERS_4 0x0650 +#define mmHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ0_FLIP_PARAMETERS_0 0x0651 +#define mmHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ0_FLIP_PARAMETERS_2 0x0653 +#define mmHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ0_NOM_PARAMETERS_4 0x0658 +#define mmHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ0_NOM_PARAMETERS_5 0x0659 +#define mmHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2 +#define mmHUBPREQ0_NOM_PARAMETERS_6 0x065a +#define mmHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2 +#define mmHUBPREQ0_NOM_PARAMETERS_7 0x065b +#define mmHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2 +#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE 0x065c +#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2 +#define mmHUBPREQ0_PER_LINE_DELIVERY 0x065d +#define mmHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2 +#define mmHUBPREQ0_CURSOR_SETTINGS 0x065e +#define mmHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065f +#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2 +#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x0660 +#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2 +#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x0661 +#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x0662 +#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec +// base address: 0x0 +#define mmHUBPRET0_HUBPRET_CONTROL 0x066a +#define mmHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066b +#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066c +#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066d +#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x066e +#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE0 0x066f +#define mmHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE1 0x0670 +#define mmHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_INTERRUPT 0x0671 +#define mmHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0672 +#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2 +#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0673 +#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec +// base address: 0x0 +#define mmCURSOR0_0_CURSOR_CONTROL 0x0678 +#define mmCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679 +#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a +#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_SIZE 0x067b +#define mmCURSOR0_0_CURSOR_SIZE_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_POSITION 0x067c +#define mmCURSOR0_0_CURSOR_POSITION_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_HOT_SPOT 0x067d +#define mmCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e +#define mmCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_DST_OFFSET 0x067f +#define mmCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680 +#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681 +#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682 +#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683 +#define mmCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_CNTL 0x0684 +#define mmCURSOR0_0_DMDATA_CNTL_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_QOS_CNTL 0x0685 +#define mmCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_STATUS 0x0686 +#define mmCURSOR0_0_DMDATA_STATUS_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_SW_CNTL 0x0687 +#define mmCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2 +#define mmCURSOR0_0_DMDATA_SW_DATA 0x0688 +#define mmCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2 + + + +// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec +// base address: 0x370 +#define mmHUBP1_DCSURF_SURFACE_CONFIG 0x06c1 +#define mmHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2 +#define mmHUBP1_DCSURF_ADDR_CONFIG 0x06c2 +#define mmHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2 +#define mmHUBP1_DCSURF_TILING_CONFIG 0x06c3 +#define mmHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8 +#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9 +#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca +#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb +#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc +#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd +#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2 +#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce +#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2 +#define mmHUBP1_DCHUBP_CNTL 0x06cf +#define mmHUBP1_DCHUBP_CNTL_BASE_IDX 2 +#define mmHUBP1_HUBP_CLK_CNTL 0x06d0 +#define mmHUBP1_HUBP_CLK_CNTL_BASE_IDX 2 +#define mmHUBP1_HUBPREQ_DEBUG_DB 0x06d2 +#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP1_HUBPREQ_DEBUG 0x06d3 +#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2 +#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d7 +#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 +#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06d8 +#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec +// base address: 0x370 +#define mmHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3 +#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4 +#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9 +#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed +#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1 +#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5 +#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6 +#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7 +#define mmHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8 +#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fc +#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fd +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fe +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06ff +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x0700 +#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0701 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0702 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0703 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0704 +#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ1_DCN_EXPANSION_MODE 0x0708 +#define mmHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2 +#define mmHUBPREQ1_DCN_TTU_QOS_WM 0x0709 +#define mmHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2 +#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x070a +#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2 +#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x070b +#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x070c +#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x070d +#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070e +#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070f +#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x0710 +#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x0711 +#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x0712 +#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ1_BLANK_OFFSET_0 0x0722 +#define mmHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2 +#define mmHUBPREQ1_BLANK_OFFSET_1 0x0723 +#define mmHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2 +#define mmHUBPREQ1_DST_DIMENSIONS 0x0724 +#define mmHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2 +#define mmHUBPREQ1_DST_AFTER_SCALER 0x0725 +#define mmHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2 +#define mmHUBPREQ1_PREFETCH_SETTINGS 0x0726 +#define mmHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ1_PREFETCH_SETTINGS_C 0x0727 +#define mmHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2 +#define mmHUBPREQ1_VBLANK_PARAMETERS_0 0x0728 +#define mmHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ1_VBLANK_PARAMETERS_1 0x0729 +#define mmHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2 +#define mmHUBPREQ1_VBLANK_PARAMETERS_2 0x072a +#define mmHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ1_VBLANK_PARAMETERS_3 0x072b +#define mmHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2 +#define mmHUBPREQ1_VBLANK_PARAMETERS_4 0x072c +#define mmHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ1_FLIP_PARAMETERS_0 0x072d +#define mmHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ1_FLIP_PARAMETERS_2 0x072f +#define mmHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ1_NOM_PARAMETERS_4 0x0734 +#define mmHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ1_NOM_PARAMETERS_5 0x0735 +#define mmHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2 +#define mmHUBPREQ1_NOM_PARAMETERS_6 0x0736 +#define mmHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2 +#define mmHUBPREQ1_NOM_PARAMETERS_7 0x0737 +#define mmHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2 +#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0738 +#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2 +#define mmHUBPREQ1_PER_LINE_DELIVERY 0x0739 +#define mmHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2 +#define mmHUBPREQ1_CURSOR_SETTINGS 0x073a +#define mmHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x073b +#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2 +#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x073c +#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2 +#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073d +#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073e +#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec +// base address: 0x370 +#define mmHUBPRET1_HUBPRET_CONTROL 0x0746 +#define mmHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0747 +#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x0748 +#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x0749 +#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074a +#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE0 0x074b +#define mmHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE1 0x074c +#define mmHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_INTERRUPT 0x074d +#define mmHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE 0x074e +#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2 +#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS 0x074f +#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec +// base address: 0x370 +#define mmCURSOR0_1_CURSOR_CONTROL 0x0754 +#define mmCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755 +#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756 +#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_SIZE 0x0757 +#define mmCURSOR0_1_CURSOR_SIZE_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_POSITION 0x0758 +#define mmCURSOR0_1_CURSOR_POSITION_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_HOT_SPOT 0x0759 +#define mmCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a +#define mmCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_DST_OFFSET 0x075b +#define mmCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c +#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d +#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e +#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f +#define mmCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_CNTL 0x0760 +#define mmCURSOR0_1_DMDATA_CNTL_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_QOS_CNTL 0x0761 +#define mmCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_STATUS 0x0762 +#define mmCURSOR0_1_DMDATA_STATUS_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_SW_CNTL 0x0763 +#define mmCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2 +#define mmCURSOR0_1_DMDATA_SW_DATA 0x0764 +#define mmCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2 + + + +// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec +// base address: 0x6e0 +#define mmHUBP2_DCSURF_SURFACE_CONFIG 0x079d +#define mmHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2 +#define mmHUBP2_DCSURF_ADDR_CONFIG 0x079e +#define mmHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2 +#define mmHUBP2_DCSURF_TILING_CONFIG 0x079f +#define mmHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4 +#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8 +#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9 +#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2 +#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa +#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2 +#define mmHUBP2_DCHUBP_CNTL 0x07ab +#define mmHUBP2_DCHUBP_CNTL_BASE_IDX 2 +#define mmHUBP2_HUBP_CLK_CNTL 0x07ac +#define mmHUBP2_HUBP_CLK_CNTL_BASE_IDX 2 +#define mmHUBP2_HUBPREQ_DEBUG_DB 0x07ae +#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP2_HUBPREQ_DEBUG 0x07af +#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2 +#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b3 +#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 +#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b4 +#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec +// base address: 0x6e0 +#define mmHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf +#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0 +#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5 +#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9 +#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd +#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1 +#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2 +#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3 +#define mmHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4 +#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d8 +#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d9 +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07da +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07db +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07dc +#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dd +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07de +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07df +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07e0 +#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ2_DCN_EXPANSION_MODE 0x07e4 +#define mmHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2 +#define mmHUBPREQ2_DCN_TTU_QOS_WM 0x07e5 +#define mmHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2 +#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e6 +#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2 +#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e7 +#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e8 +#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e9 +#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07ea +#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07eb +#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07ec +#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07ed +#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ee +#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ2_BLANK_OFFSET_0 0x07fe +#define mmHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2 +#define mmHUBPREQ2_BLANK_OFFSET_1 0x07ff +#define mmHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2 +#define mmHUBPREQ2_DST_DIMENSIONS 0x0800 +#define mmHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2 +#define mmHUBPREQ2_DST_AFTER_SCALER 0x0801 +#define mmHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2 +#define mmHUBPREQ2_PREFETCH_SETTINGS 0x0802 +#define mmHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ2_PREFETCH_SETTINGS_C 0x0803 +#define mmHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2 +#define mmHUBPREQ2_VBLANK_PARAMETERS_0 0x0804 +#define mmHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ2_VBLANK_PARAMETERS_1 0x0805 +#define mmHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2 +#define mmHUBPREQ2_VBLANK_PARAMETERS_2 0x0806 +#define mmHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ2_VBLANK_PARAMETERS_3 0x0807 +#define mmHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2 +#define mmHUBPREQ2_VBLANK_PARAMETERS_4 0x0808 +#define mmHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ2_FLIP_PARAMETERS_0 0x0809 +#define mmHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ2_FLIP_PARAMETERS_2 0x080b +#define mmHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ2_NOM_PARAMETERS_4 0x0810 +#define mmHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ2_NOM_PARAMETERS_5 0x0811 +#define mmHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2 +#define mmHUBPREQ2_NOM_PARAMETERS_6 0x0812 +#define mmHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2 +#define mmHUBPREQ2_NOM_PARAMETERS_7 0x0813 +#define mmHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2 +#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0814 +#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2 +#define mmHUBPREQ2_PER_LINE_DELIVERY 0x0815 +#define mmHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2 +#define mmHUBPREQ2_CURSOR_SETTINGS 0x0816 +#define mmHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0817 +#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2 +#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0818 +#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2 +#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0819 +#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x081a +#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec +// base address: 0x6e0 +#define mmHUBPRET2_HUBPRET_CONTROL 0x0822 +#define mmHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0823 +#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0824 +#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0825 +#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0826 +#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE0 0x0827 +#define mmHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE1 0x0828 +#define mmHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_INTERRUPT 0x0829 +#define mmHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082a +#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2 +#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082b +#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec +// base address: 0x6e0 +#define mmCURSOR0_2_CURSOR_CONTROL 0x0830 +#define mmCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831 +#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832 +#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_SIZE 0x0833 +#define mmCURSOR0_2_CURSOR_SIZE_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_POSITION 0x0834 +#define mmCURSOR0_2_CURSOR_POSITION_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_HOT_SPOT 0x0835 +#define mmCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836 +#define mmCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_DST_OFFSET 0x0837 +#define mmCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838 +#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839 +#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a +#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b +#define mmCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_CNTL 0x083c +#define mmCURSOR0_2_DMDATA_CNTL_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_QOS_CNTL 0x083d +#define mmCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_STATUS 0x083e +#define mmCURSOR0_2_DMDATA_STATUS_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_SW_CNTL 0x083f +#define mmCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2 +#define mmCURSOR0_2_DMDATA_SW_DATA 0x0840 +#define mmCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec +// base address: 0xa50 +#define mmHUBP3_DCSURF_SURFACE_CONFIG 0x0879 +#define mmHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2 +#define mmHUBP3_DCSURF_ADDR_CONFIG 0x087a +#define mmHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2 +#define mmHUBP3_DCSURF_TILING_CONFIG 0x087b +#define mmHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2 +#define mmHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d +#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e +#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f +#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880 +#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884 +#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2 +#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885 +#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2 +#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886 +#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2 +#define mmHUBP3_DCHUBP_CNTL 0x0887 +#define mmHUBP3_DCHUBP_CNTL_BASE_IDX 2 +#define mmHUBP3_HUBP_CLK_CNTL 0x0888 +#define mmHUBP3_HUBP_CLK_CNTL_BASE_IDX 2 +#define mmHUBP3_HUBPREQ_DEBUG_DB 0x088a +#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2 +#define mmHUBP3_HUBPREQ_DEBUG 0x088b +#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2 +#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x088f +#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 +#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0890 +#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec +// base address: 0xa50 +#define mmHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b +#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c +#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1 +#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5 +#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9 +#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad +#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae +#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af +#define mmHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0 +#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b4 +#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b5 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b6 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b7 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b8 +#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b9 +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08ba +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08bb +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2 +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bc +#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2 +#define mmHUBPREQ3_DCN_EXPANSION_MODE 0x08c0 +#define mmHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2 +#define mmHUBPREQ3_DCN_TTU_QOS_WM 0x08c1 +#define mmHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2 +#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08c2 +#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2 +#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08c3 +#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c4 +#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c5 +#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c6 +#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c7 +#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2 +#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c8 +#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2 +#define mmHUBPREQ3_BLANK_OFFSET_0 0x08da +#define mmHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2 +#define mmHUBPREQ3_BLANK_OFFSET_1 0x08db +#define mmHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2 +#define mmHUBPREQ3_DST_DIMENSIONS 0x08dc +#define mmHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2 +#define mmHUBPREQ3_DST_AFTER_SCALER 0x08dd +#define mmHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2 +#define mmHUBPREQ3_PREFETCH_SETTINGS 0x08de +#define mmHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ3_PREFETCH_SETTINGS_C 0x08df +#define mmHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_0 0x08e0 +#define mmHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_1 0x08e1 +#define mmHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_2 0x08e2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_3 0x08e3 +#define mmHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2 +#define mmHUBPREQ3_VBLANK_PARAMETERS_4 0x08e4 +#define mmHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ3_FLIP_PARAMETERS_0 0x08e5 +#define mmHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2 +#define mmHUBPREQ3_FLIP_PARAMETERS_2 0x08e7 +#define mmHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2 +#define mmHUBPREQ3_NOM_PARAMETERS_4 0x08ec +#define mmHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2 +#define mmHUBPREQ3_NOM_PARAMETERS_5 0x08ed +#define mmHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2 +#define mmHUBPREQ3_NOM_PARAMETERS_6 0x08ee +#define mmHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2 +#define mmHUBPREQ3_NOM_PARAMETERS_7 0x08ef +#define mmHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2 +#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08f0 +#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2 +#define mmHUBPREQ3_PER_LINE_DELIVERY 0x08f1 +#define mmHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2 +#define mmHUBPREQ3_CURSOR_SETTINGS 0x08f2 +#define mmHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2 +#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f3 +#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2 +#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f4 +#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2 +#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f5 +#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f6 +#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec +// base address: 0xa50 +#define mmHUBPRET3_HUBPRET_CONTROL 0x08fe +#define mmHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x08ff +#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0900 +#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0901 +#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0902 +#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE0 0x0903 +#define mmHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE1 0x0904 +#define mmHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_INTERRUPT 0x0905 +#define mmHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0906 +#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2 +#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0907 +#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec +// base address: 0xa50 +#define mmCURSOR0_3_CURSOR_CONTROL 0x090c +#define mmCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d +#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e +#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_SIZE 0x090f +#define mmCURSOR0_3_CURSOR_SIZE_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_POSITION 0x0910 +#define mmCURSOR0_3_CURSOR_POSITION_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_HOT_SPOT 0x0911 +#define mmCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912 +#define mmCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_DST_OFFSET 0x0913 +#define mmCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914 +#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915 +#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916 +#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917 +#define mmCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_CNTL 0x0918 +#define mmCURSOR0_3_DMDATA_CNTL_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_QOS_CNTL 0x0919 +#define mmCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_STATUS 0x091a +#define mmCURSOR0_3_DMDATA_STATUS_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_SW_CNTL 0x091b +#define mmCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2 +#define mmCURSOR0_3_DMDATA_SW_DATA 0x091c +#define mmCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2 + +// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec +// base address: 0x0 +#define mmDPP_TOP0_DPP_CONTROL 0x0cc5 +#define mmDPP_TOP0_DPP_CONTROL_BASE_IDX 2 +#define mmDPP_TOP0_DPP_SOFT_RESET 0x0cc6 +#define mmDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2 +#define mmDPP_TOP0_DPP_CRC_VAL_R_G 0x0cc7 +#define mmDPP_TOP0_DPP_CRC_VAL_R_G_BASE_IDX 2 +#define mmDPP_TOP0_DPP_CRC_VAL_B_A 0x0cc8 +#define mmDPP_TOP0_DPP_CRC_VAL_B_A_BASE_IDX 2 +#define mmDPP_TOP0_DPP_CRC_CTRL 0x0cc9 +#define mmDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2 +#define mmDPP_TOP0_HOST_READ_CONTROL 0x0cca +#define mmDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec +// base address: 0x0 +#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf +#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2 +#define mmCNVC_CFG0_FORMAT_CONTROL 0x0cd0 +#define mmCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1 +#define mmCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2 +#define mmCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3 +#define mmCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4 +#define mmCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5 +#define mmCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2 +#define mmCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6 +#define mmCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2 +#define mmCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7 +#define mmCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8 +#define mmCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2 +#define mmCNVC_CFG0_COLOR_KEYER_RED 0x0cd9 +#define mmCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2 +#define mmCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda +#define mmCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2 +#define mmCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb +#define mmCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2 +#define mmCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd +#define mmCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec +// base address: 0x0 +#define mmCNVC_CUR0_CURSOR0_CONTROL 0x0ce0 +#define mmCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2 +#define mmCNVC_CUR0_CURSOR0_COLOR0 0x0ce1 +#define mmCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2 +#define mmCNVC_CUR0_CURSOR0_COLOR1 0x0ce2 +#define mmCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2 +#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0ce3 +#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec +// base address: 0x0 +#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cea +#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2 +#define mmDSCL0_SCL_COEF_RAM_TAP_DATA 0x0ceb +#define mmDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2 +#define mmDSCL0_SCL_MODE 0x0cec +#define mmDSCL0_SCL_MODE_BASE_IDX 2 +#define mmDSCL0_SCL_TAP_CONTROL 0x0ced +#define mmDSCL0_SCL_TAP_CONTROL_BASE_IDX 2 +#define mmDSCL0_DSCL_CONTROL 0x0cee +#define mmDSCL0_DSCL_CONTROL_BASE_IDX 2 +#define mmDSCL0_DSCL_2TAP_CONTROL 0x0cef +#define mmDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2 +#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cf0 +#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2 +#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0cf1 +#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL0_SCL_HORZ_FILTER_INIT 0x0cf2 +#define mmDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2 +#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0cf3 +#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL0_SCL_HORZ_FILTER_INIT_C 0x0cf4 +#define mmDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0cf5 +#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_INIT 0x0cf6 +#define mmDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0cf7 +#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0cf8 +#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_INIT_C 0x0cf9 +#define mmDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0cfa +#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2 +#define mmDSCL0_SCL_BLACK_OFFSET 0x0cfb +#define mmDSCL0_SCL_BLACK_OFFSET_BASE_IDX 2 +#define mmDSCL0_DSCL_UPDATE 0x0cfc +#define mmDSCL0_DSCL_UPDATE_BASE_IDX 2 +#define mmDSCL0_DSCL_AUTOCAL 0x0cfd +#define mmDSCL0_DSCL_AUTOCAL_BASE_IDX 2 +#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0cfe +#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2 +#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0cff +#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2 +#define mmDSCL0_OTG_H_BLANK 0x0d00 +#define mmDSCL0_OTG_H_BLANK_BASE_IDX 2 +#define mmDSCL0_OTG_V_BLANK 0x0d01 +#define mmDSCL0_OTG_V_BLANK_BASE_IDX 2 +#define mmDSCL0_RECOUT_START 0x0d02 +#define mmDSCL0_RECOUT_START_BASE_IDX 2 +#define mmDSCL0_RECOUT_SIZE 0x0d03 +#define mmDSCL0_RECOUT_SIZE_BASE_IDX 2 +#define mmDSCL0_MPC_SIZE 0x0d04 +#define mmDSCL0_MPC_SIZE_BASE_IDX 2 +#define mmDSCL0_LB_DATA_FORMAT 0x0d05 +#define mmDSCL0_LB_DATA_FORMAT_BASE_IDX 2 +#define mmDSCL0_LB_MEMORY_CTRL 0x0d06 +#define mmDSCL0_LB_MEMORY_CTRL_BASE_IDX 2 +#define mmDSCL0_LB_V_COUNTER 0x0d07 +#define mmDSCL0_LB_V_COUNTER_BASE_IDX 2 +#define mmDSCL0_DSCL_MEM_PWR_CTRL 0x0d08 +#define mmDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDSCL0_DSCL_MEM_PWR_STATUS 0x0d09 +#define mmDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDSCL0_OBUF_CONTROL 0x0d0a +#define mmDSCL0_OBUF_CONTROL_BASE_IDX 2 +#define mmDSCL0_OBUF_MEM_PWR_CTRL 0x0d0b +#define mmDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec +// base address: 0x0 +#define mmCM0_CM_CONTROL 0x0d1a +#define mmCM0_CM_CONTROL_BASE_IDX 2 +#define mmCM0_CM_ICSC_CONTROL 0x0d1b +#define mmCM0_CM_ICSC_CONTROL_BASE_IDX 2 +#define mmCM0_CM_ICSC_C11_C12 0x0d1c +#define mmCM0_CM_ICSC_C11_C12_BASE_IDX 2 +#define mmCM0_CM_ICSC_C13_C14 0x0d1d +#define mmCM0_CM_ICSC_C13_C14_BASE_IDX 2 +#define mmCM0_CM_ICSC_C21_C22 0x0d1e +#define mmCM0_CM_ICSC_C21_C22_BASE_IDX 2 +#define mmCM0_CM_ICSC_C23_C24 0x0d1f +#define mmCM0_CM_ICSC_C23_C24_BASE_IDX 2 +#define mmCM0_CM_ICSC_C31_C32 0x0d20 +#define mmCM0_CM_ICSC_C31_C32_BASE_IDX 2 +#define mmCM0_CM_ICSC_C33_C34 0x0d21 +#define mmCM0_CM_ICSC_C33_C34_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C11_C12 0x0d22 +#define mmCM0_CM_ICSC_B_C11_C12_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C13_C14 0x0d23 +#define mmCM0_CM_ICSC_B_C13_C14_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C21_C22 0x0d24 +#define mmCM0_CM_ICSC_B_C21_C22_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C23_C24 0x0d25 +#define mmCM0_CM_ICSC_B_C23_C24_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C31_C32 0x0d26 +#define mmCM0_CM_ICSC_B_C31_C32_BASE_IDX 2 +#define mmCM0_CM_ICSC_B_C33_C34 0x0d27 +#define mmCM0_CM_ICSC_B_C33_C34_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_CONTROL 0x0d28 +#define mmCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C11_C12 0x0d29 +#define mmCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C13_C14 0x0d2a +#define mmCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C21_C22 0x0d2b +#define mmCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C23_C24 0x0d2c +#define mmCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C31_C32 0x0d2d +#define mmCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_C33_C34 0x0d2e +#define mmCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d2f +#define mmCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d30 +#define mmCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d31 +#define mmCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d32 +#define mmCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d33 +#define mmCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2 +#define mmCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d34 +#define mmCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2 +#define mmCM0_CM_BIAS_CR_R 0x0d35 +#define mmCM0_CM_BIAS_CR_R_BASE_IDX 2 +#define mmCM0_CM_BIAS_Y_G_CB_B 0x0d36 +#define mmCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_CONTROL 0x0d37 +#define mmCM0_CM_DGAM_CONTROL_BASE_IDX 2 +#define mmCM0_CM_DGAM_LUT_INDEX 0x0d38 +#define mmCM0_CM_DGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM0_CM_DGAM_LUT_DATA 0x0d39 +#define mmCM0_CM_DGAM_LUT_DATA_BASE_IDX 2 +#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK 0x0d3a +#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_START_CNTL_B 0x0d3b +#define mmCM0_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_START_CNTL_G 0x0d3c +#define mmCM0_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_START_CNTL_R 0x0d3d +#define mmCM0_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0d3e +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0d3f +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0d40 +#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B 0x0d41 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B 0x0d42 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G 0x0d43 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G 0x0d44 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R 0x0d45 +#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R 0x0d46 +#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_0_1 0x0d47 +#define mmCM0_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_2_3 0x0d48 +#define mmCM0_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_4_5 0x0d49 +#define mmCM0_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_6_7 0x0d4a +#define mmCM0_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_8_9 0x0d4b +#define mmCM0_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_10_11 0x0d4c +#define mmCM0_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_12_13 0x0d4d +#define mmCM0_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMA_REGION_14_15 0x0d4e +#define mmCM0_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_START_CNTL_B 0x0d4f +#define mmCM0_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_START_CNTL_G 0x0d50 +#define mmCM0_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_START_CNTL_R 0x0d51 +#define mmCM0_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0d52 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0d53 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0d54 +#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B 0x0d55 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B 0x0d56 +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G 0x0d57 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G 0x0d58 +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R 0x0d59 +#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R 0x0d5a +#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_0_1 0x0d5b +#define mmCM0_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_2_3 0x0d5c +#define mmCM0_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_4_5 0x0d5d +#define mmCM0_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_6_7 0x0d5e +#define mmCM0_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_8_9 0x0d5f +#define mmCM0_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_10_11 0x0d60 +#define mmCM0_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_12_13 0x0d61 +#define mmCM0_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_DGAM_RAMB_REGION_14_15 0x0d62 +#define mmCM0_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_CONTROL 0x0d63 +#define mmCM0_CM_BLNDGAM_CONTROL_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_LUT_INDEX 0x0d64 +#define mmCM0_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_LUT_DATA 0x0d65 +#define mmCM0_CM_BLNDGAM_LUT_DATA_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0d66 +#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B 0x0d67 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G 0x0d68 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R 0x0d69 +#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0d6a +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0d6b +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0d6c +#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0d6d +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0d6e +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0d6f +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0d70 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0d71 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0d72 +#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1 0x0d73 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3 0x0d74 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5 0x0d75 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7 0x0d76 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9 0x0d77 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11 0x0d78 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13 0x0d79 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15 0x0d7a +#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17 0x0d7b +#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19 0x0d7c +#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21 0x0d7d +#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23 0x0d7e +#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25 0x0d7f +#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27 0x0d80 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29 0x0d81 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31 0x0d82 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33 0x0d83 +#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B 0x0d84 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G 0x0d85 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R 0x0d86 +#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0d87 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0d88 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0d89 +#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0d8a +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0d8b +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0d8c +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0d8d +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0d8e +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0d8f +#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1 0x0d90 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3 0x0d91 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5 0x0d92 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7 0x0d93 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9 0x0d94 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11 0x0d95 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13 0x0d96 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15 0x0d97 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17 0x0d98 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19 0x0d99 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21 0x0d9a +#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23 0x0d9b +#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25 0x0d9c +#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27 0x0d9d +#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29 0x0d9e +#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31 0x0d9f +#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33 0x0da0 +#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM0_CM_HDR_MULT_COEF 0x0da1 +#define mmCM0_CM_HDR_MULT_COEF_BASE_IDX 2 +#define mmCM0_CM_MEM_PWR_CTRL 0x0da2 +#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCM0_CM_MEM_PWR_STATUS 0x0da3 +#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCM0_CM_DEALPHA 0x0da5 +#define mmCM0_CM_DEALPHA_BASE_IDX 2 +#define mmCM0_CM_COEF_FORMAT 0x0da6 +#define mmCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define mmCM0_CM_SHAPER_CONTROL 0x0da7 +#define mmCM0_CM_SHAPER_CONTROL_BASE_IDX 2 +#define mmCM0_CM_SHAPER_OFFSET_R 0x0da8 +#define mmCM0_CM_SHAPER_OFFSET_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_OFFSET_G 0x0da9 +#define mmCM0_CM_SHAPER_OFFSET_G_BASE_IDX 2 +#define mmCM0_CM_SHAPER_OFFSET_B 0x0daa +#define mmCM0_CM_SHAPER_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_SCALE_R 0x0dab +#define mmCM0_CM_SHAPER_SCALE_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_SCALE_G_B 0x0dac +#define mmCM0_CM_SHAPER_SCALE_G_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_LUT_INDEX 0x0dad +#define mmCM0_CM_SHAPER_LUT_INDEX_BASE_IDX 2 +#define mmCM0_CM_SHAPER_LUT_DATA 0x0dae +#define mmCM0_CM_SHAPER_LUT_DATA_BASE_IDX 2 +#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK 0x0daf +#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B 0x0db0 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G 0x0db1 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R 0x0db2 +#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B 0x0db3 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G 0x0db4 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R 0x0db5 +#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_0_1 0x0db6 +#define mmCM0_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_2_3 0x0db7 +#define mmCM0_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_4_5 0x0db8 +#define mmCM0_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_6_7 0x0db9 +#define mmCM0_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_8_9 0x0dba +#define mmCM0_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_10_11 0x0dbb +#define mmCM0_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_12_13 0x0dbc +#define mmCM0_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_14_15 0x0dbd +#define mmCM0_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_16_17 0x0dbe +#define mmCM0_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_18_19 0x0dbf +#define mmCM0_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_20_21 0x0dc0 +#define mmCM0_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_22_23 0x0dc1 +#define mmCM0_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_24_25 0x0dc2 +#define mmCM0_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_26_27 0x0dc3 +#define mmCM0_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_28_29 0x0dc4 +#define mmCM0_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_30_31 0x0dc5 +#define mmCM0_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMA_REGION_32_33 0x0dc6 +#define mmCM0_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B 0x0dc7 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G 0x0dc8 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R 0x0dc9 +#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B 0x0dca +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G 0x0dcb +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R 0x0dcc +#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_0_1 0x0dcd +#define mmCM0_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_2_3 0x0dce +#define mmCM0_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_4_5 0x0dcf +#define mmCM0_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_6_7 0x0dd0 +#define mmCM0_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_8_9 0x0dd1 +#define mmCM0_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_10_11 0x0dd2 +#define mmCM0_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_12_13 0x0dd3 +#define mmCM0_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_14_15 0x0dd4 +#define mmCM0_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_16_17 0x0dd5 +#define mmCM0_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_18_19 0x0dd6 +#define mmCM0_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_20_21 0x0dd7 +#define mmCM0_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_22_23 0x0dd8 +#define mmCM0_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_24_25 0x0dd9 +#define mmCM0_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_26_27 0x0dda +#define mmCM0_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_28_29 0x0ddb +#define mmCM0_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_30_31 0x0ddc +#define mmCM0_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM0_CM_SHAPER_RAMB_REGION_32_33 0x0ddd +#define mmCM0_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM0_CM_MEM_PWR_CTRL2 0x0dde +#define mmCM0_CM_MEM_PWR_CTRL2_BASE_IDX 2 +#define mmCM0_CM_MEM_PWR_STATUS2 0x0ddf +#define mmCM0_CM_MEM_PWR_STATUS2_BASE_IDX 2 +#define mmCM0_CM_3DLUT_MODE 0x0de0 +#define mmCM0_CM_3DLUT_MODE_BASE_IDX 2 +#define mmCM0_CM_3DLUT_INDEX 0x0de1 +#define mmCM0_CM_3DLUT_INDEX_BASE_IDX 2 +#define mmCM0_CM_3DLUT_DATA 0x0de2 +#define mmCM0_CM_3DLUT_DATA_BASE_IDX 2 +#define mmCM0_CM_3DLUT_DATA_30BIT 0x0de3 +#define mmCM0_CM_3DLUT_DATA_30BIT_BASE_IDX 2 +#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL 0x0de4 +#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2 +#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR 0x0de5 +#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2 +#define mmCM0_CM_3DLUT_OUT_OFFSET_R 0x0de6 +#define mmCM0_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2 +#define mmCM0_CM_3DLUT_OUT_OFFSET_G 0x0de7 +#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 +#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0de8 +#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0de9 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0dea +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 + +// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec +// base address: 0x5ac +#define mmDPP_TOP1_DPP_CONTROL 0x0e30 +#define mmDPP_TOP1_DPP_CONTROL_BASE_IDX 2 +#define mmDPP_TOP1_DPP_SOFT_RESET 0x0e31 +#define mmDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2 +#define mmDPP_TOP1_DPP_CRC_VAL_R_G 0x0e32 +#define mmDPP_TOP1_DPP_CRC_VAL_R_G_BASE_IDX 2 +#define mmDPP_TOP1_DPP_CRC_VAL_B_A 0x0e33 +#define mmDPP_TOP1_DPP_CRC_VAL_B_A_BASE_IDX 2 +#define mmDPP_TOP1_DPP_CRC_CTRL 0x0e34 +#define mmDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2 +#define mmDPP_TOP1_HOST_READ_CONTROL 0x0e35 +#define mmDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec +// base address: 0x5ac +#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a +#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2 +#define mmCNVC_CFG1_FORMAT_CONTROL 0x0e3b +#define mmCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c +#define mmCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d +#define mmCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e +#define mmCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f +#define mmCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40 +#define mmCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2 +#define mmCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41 +#define mmCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2 +#define mmCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42 +#define mmCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43 +#define mmCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2 +#define mmCNVC_CFG1_COLOR_KEYER_RED 0x0e44 +#define mmCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2 +#define mmCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45 +#define mmCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2 +#define mmCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46 +#define mmCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2 +#define mmCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48 +#define mmCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2 + +// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec +// base address: 0x5ac +#define mmCNVC_CUR1_CURSOR0_CONTROL 0x0e4b +#define mmCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2 +#define mmCNVC_CUR1_CURSOR0_COLOR0 0x0e4c +#define mmCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2 +#define mmCNVC_CUR1_CURSOR0_COLOR1 0x0e4d +#define mmCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2 +#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e4e +#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec +// base address: 0x5ac +#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e55 +#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2 +#define mmDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e56 +#define mmDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2 +#define mmDSCL1_SCL_MODE 0x0e57 +#define mmDSCL1_SCL_MODE_BASE_IDX 2 +#define mmDSCL1_SCL_TAP_CONTROL 0x0e58 +#define mmDSCL1_SCL_TAP_CONTROL_BASE_IDX 2 +#define mmDSCL1_DSCL_CONTROL 0x0e59 +#define mmDSCL1_DSCL_CONTROL_BASE_IDX 2 +#define mmDSCL1_DSCL_2TAP_CONTROL 0x0e5a +#define mmDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2 +#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e5b +#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2 +#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e5c +#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL1_SCL_HORZ_FILTER_INIT 0x0e5d +#define mmDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2 +#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e5e +#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e5f +#define mmDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e60 +#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_INIT 0x0e61 +#define mmDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e62 +#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e63 +#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_INIT_C 0x0e64 +#define mmDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e65 +#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2 +#define mmDSCL1_SCL_BLACK_OFFSET 0x0e66 +#define mmDSCL1_SCL_BLACK_OFFSET_BASE_IDX 2 +#define mmDSCL1_DSCL_UPDATE 0x0e67 +#define mmDSCL1_DSCL_UPDATE_BASE_IDX 2 +#define mmDSCL1_DSCL_AUTOCAL 0x0e68 +#define mmDSCL1_DSCL_AUTOCAL_BASE_IDX 2 +#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e69 +#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2 +#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e6a +#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2 +#define mmDSCL1_OTG_H_BLANK 0x0e6b +#define mmDSCL1_OTG_H_BLANK_BASE_IDX 2 +#define mmDSCL1_OTG_V_BLANK 0x0e6c +#define mmDSCL1_OTG_V_BLANK_BASE_IDX 2 +#define mmDSCL1_RECOUT_START 0x0e6d +#define mmDSCL1_RECOUT_START_BASE_IDX 2 +#define mmDSCL1_RECOUT_SIZE 0x0e6e +#define mmDSCL1_RECOUT_SIZE_BASE_IDX 2 +#define mmDSCL1_MPC_SIZE 0x0e6f +#define mmDSCL1_MPC_SIZE_BASE_IDX 2 +#define mmDSCL1_LB_DATA_FORMAT 0x0e70 +#define mmDSCL1_LB_DATA_FORMAT_BASE_IDX 2 +#define mmDSCL1_LB_MEMORY_CTRL 0x0e71 +#define mmDSCL1_LB_MEMORY_CTRL_BASE_IDX 2 +#define mmDSCL1_LB_V_COUNTER 0x0e72 +#define mmDSCL1_LB_V_COUNTER_BASE_IDX 2 +#define mmDSCL1_DSCL_MEM_PWR_CTRL 0x0e73 +#define mmDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDSCL1_DSCL_MEM_PWR_STATUS 0x0e74 +#define mmDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDSCL1_OBUF_CONTROL 0x0e75 +#define mmDSCL1_OBUF_CONTROL_BASE_IDX 2 +#define mmDSCL1_OBUF_MEM_PWR_CTRL 0x0e76 +#define mmDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec +// base address: 0x5ac +#define mmCM1_CM_CONTROL 0x0e85 +#define mmCM1_CM_CONTROL_BASE_IDX 2 +#define mmCM1_CM_ICSC_CONTROL 0x0e86 +#define mmCM1_CM_ICSC_CONTROL_BASE_IDX 2 +#define mmCM1_CM_ICSC_C11_C12 0x0e87 +#define mmCM1_CM_ICSC_C11_C12_BASE_IDX 2 +#define mmCM1_CM_ICSC_C13_C14 0x0e88 +#define mmCM1_CM_ICSC_C13_C14_BASE_IDX 2 +#define mmCM1_CM_ICSC_C21_C22 0x0e89 +#define mmCM1_CM_ICSC_C21_C22_BASE_IDX 2 +#define mmCM1_CM_ICSC_C23_C24 0x0e8a +#define mmCM1_CM_ICSC_C23_C24_BASE_IDX 2 +#define mmCM1_CM_ICSC_C31_C32 0x0e8b +#define mmCM1_CM_ICSC_C31_C32_BASE_IDX 2 +#define mmCM1_CM_ICSC_C33_C34 0x0e8c +#define mmCM1_CM_ICSC_C33_C34_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C11_C12 0x0e8d +#define mmCM1_CM_ICSC_B_C11_C12_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C13_C14 0x0e8e +#define mmCM1_CM_ICSC_B_C13_C14_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C21_C22 0x0e8f +#define mmCM1_CM_ICSC_B_C21_C22_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C23_C24 0x0e90 +#define mmCM1_CM_ICSC_B_C23_C24_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C31_C32 0x0e91 +#define mmCM1_CM_ICSC_B_C31_C32_BASE_IDX 2 +#define mmCM1_CM_ICSC_B_C33_C34 0x0e92 +#define mmCM1_CM_ICSC_B_C33_C34_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_CONTROL 0x0e93 +#define mmCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C11_C12 0x0e94 +#define mmCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C13_C14 0x0e95 +#define mmCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C21_C22 0x0e96 +#define mmCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C23_C24 0x0e97 +#define mmCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C31_C32 0x0e98 +#define mmCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_C33_C34 0x0e99 +#define mmCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C11_C12 0x0e9a +#define mmCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C13_C14 0x0e9b +#define mmCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C21_C22 0x0e9c +#define mmCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C23_C24 0x0e9d +#define mmCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C31_C32 0x0e9e +#define mmCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2 +#define mmCM1_CM_GAMUT_REMAP_B_C33_C34 0x0e9f +#define mmCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2 +#define mmCM1_CM_BIAS_CR_R 0x0ea0 +#define mmCM1_CM_BIAS_CR_R_BASE_IDX 2 +#define mmCM1_CM_BIAS_Y_G_CB_B 0x0ea1 +#define mmCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_CONTROL 0x0ea2 +#define mmCM1_CM_DGAM_CONTROL_BASE_IDX 2 +#define mmCM1_CM_DGAM_LUT_INDEX 0x0ea3 +#define mmCM1_CM_DGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM1_CM_DGAM_LUT_DATA 0x0ea4 +#define mmCM1_CM_DGAM_LUT_DATA_BASE_IDX 2 +#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK 0x0ea5 +#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_B 0x0ea6 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_G 0x0ea7 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_R 0x0ea8 +#define mmCM1_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0ea9 +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0eaa +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0eab +#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B 0x0eac +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B 0x0ead +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G 0x0eae +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G 0x0eaf +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R 0x0eb0 +#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R 0x0eb1 +#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_0_1 0x0eb2 +#define mmCM1_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_2_3 0x0eb3 +#define mmCM1_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_4_5 0x0eb4 +#define mmCM1_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_6_7 0x0eb5 +#define mmCM1_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_8_9 0x0eb6 +#define mmCM1_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_10_11 0x0eb7 +#define mmCM1_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_12_13 0x0eb8 +#define mmCM1_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMA_REGION_14_15 0x0eb9 +#define mmCM1_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_START_CNTL_B 0x0eba +#define mmCM1_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_START_CNTL_G 0x0ebb +#define mmCM1_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_START_CNTL_R 0x0ebc +#define mmCM1_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0ebd +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0ebe +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0ebf +#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B 0x0ec0 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B 0x0ec1 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G 0x0ec2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G 0x0ec3 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R 0x0ec4 +#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R 0x0ec5 +#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_0_1 0x0ec6 +#define mmCM1_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_2_3 0x0ec7 +#define mmCM1_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_4_5 0x0ec8 +#define mmCM1_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_6_7 0x0ec9 +#define mmCM1_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_8_9 0x0eca +#define mmCM1_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_10_11 0x0ecb +#define mmCM1_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_12_13 0x0ecc +#define mmCM1_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_DGAM_RAMB_REGION_14_15 0x0ecd +#define mmCM1_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_CONTROL 0x0ece +#define mmCM1_CM_BLNDGAM_CONTROL_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_LUT_INDEX 0x0ecf +#define mmCM1_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_LUT_DATA 0x0ed0 +#define mmCM1_CM_BLNDGAM_LUT_DATA_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0ed1 +#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B 0x0ed2 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G 0x0ed3 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R 0x0ed4 +#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0ed5 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0ed6 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0ed7 +#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0ed8 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0ed9 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0eda +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0edb +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0edc +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0edd +#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1 0x0ede +#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3 0x0edf +#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5 0x0ee0 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7 0x0ee1 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9 0x0ee2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11 0x0ee3 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13 0x0ee4 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15 0x0ee5 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17 0x0ee6 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19 0x0ee7 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21 0x0ee8 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23 0x0ee9 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25 0x0eea +#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27 0x0eeb +#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29 0x0eec +#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31 0x0eed +#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33 0x0eee +#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B 0x0eef +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G 0x0ef0 +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R 0x0ef1 +#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0ef2 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0ef3 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0ef4 +#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0ef5 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0ef6 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0ef7 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0ef8 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0ef9 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0efa +#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1 0x0efb +#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3 0x0efc +#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5 0x0efd +#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7 0x0efe +#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9 0x0eff +#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11 0x0f00 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13 0x0f01 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15 0x0f02 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17 0x0f03 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19 0x0f04 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21 0x0f05 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23 0x0f06 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25 0x0f07 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27 0x0f08 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29 0x0f09 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31 0x0f0a +#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33 0x0f0b +#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM1_CM_HDR_MULT_COEF 0x0f0c +#define mmCM1_CM_HDR_MULT_COEF_BASE_IDX 2 +#define mmCM1_CM_MEM_PWR_CTRL 0x0f0d +#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCM1_CM_MEM_PWR_STATUS 0x0f0e +#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCM1_CM_DEALPHA 0x0f10 +#define mmCM1_CM_DEALPHA_BASE_IDX 2 +#define mmCM1_CM_COEF_FORMAT 0x0f11 +#define mmCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define mmCM1_CM_SHAPER_CONTROL 0x0f12 +#define mmCM1_CM_SHAPER_CONTROL_BASE_IDX 2 +#define mmCM1_CM_SHAPER_OFFSET_R 0x0f13 +#define mmCM1_CM_SHAPER_OFFSET_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_OFFSET_G 0x0f14 +#define mmCM1_CM_SHAPER_OFFSET_G_BASE_IDX 2 +#define mmCM1_CM_SHAPER_OFFSET_B 0x0f15 +#define mmCM1_CM_SHAPER_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_SCALE_R 0x0f16 +#define mmCM1_CM_SHAPER_SCALE_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_SCALE_G_B 0x0f17 +#define mmCM1_CM_SHAPER_SCALE_G_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_LUT_INDEX 0x0f18 +#define mmCM1_CM_SHAPER_LUT_INDEX_BASE_IDX 2 +#define mmCM1_CM_SHAPER_LUT_DATA 0x0f19 +#define mmCM1_CM_SHAPER_LUT_DATA_BASE_IDX 2 +#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK 0x0f1a +#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B 0x0f1b +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G 0x0f1c +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R 0x0f1d +#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B 0x0f1e +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G 0x0f1f +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R 0x0f20 +#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_0_1 0x0f21 +#define mmCM1_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_2_3 0x0f22 +#define mmCM1_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_4_5 0x0f23 +#define mmCM1_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_6_7 0x0f24 +#define mmCM1_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_8_9 0x0f25 +#define mmCM1_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_10_11 0x0f26 +#define mmCM1_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_12_13 0x0f27 +#define mmCM1_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_14_15 0x0f28 +#define mmCM1_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_16_17 0x0f29 +#define mmCM1_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_18_19 0x0f2a +#define mmCM1_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_20_21 0x0f2b +#define mmCM1_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_22_23 0x0f2c +#define mmCM1_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_24_25 0x0f2d +#define mmCM1_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_26_27 0x0f2e +#define mmCM1_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_28_29 0x0f2f +#define mmCM1_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_30_31 0x0f30 +#define mmCM1_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMA_REGION_32_33 0x0f31 +#define mmCM1_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B 0x0f32 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G 0x0f33 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R 0x0f34 +#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B 0x0f35 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G 0x0f36 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R 0x0f37 +#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_0_1 0x0f38 +#define mmCM1_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_2_3 0x0f39 +#define mmCM1_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_4_5 0x0f3a +#define mmCM1_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_6_7 0x0f3b +#define mmCM1_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_8_9 0x0f3c +#define mmCM1_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_10_11 0x0f3d +#define mmCM1_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_12_13 0x0f3e +#define mmCM1_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_14_15 0x0f3f +#define mmCM1_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_16_17 0x0f40 +#define mmCM1_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_18_19 0x0f41 +#define mmCM1_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_20_21 0x0f42 +#define mmCM1_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_22_23 0x0f43 +#define mmCM1_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_24_25 0x0f44 +#define mmCM1_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_26_27 0x0f45 +#define mmCM1_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_28_29 0x0f46 +#define mmCM1_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_30_31 0x0f47 +#define mmCM1_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM1_CM_SHAPER_RAMB_REGION_32_33 0x0f48 +#define mmCM1_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM1_CM_MEM_PWR_CTRL2 0x0f49 +#define mmCM1_CM_MEM_PWR_CTRL2_BASE_IDX 2 +#define mmCM1_CM_MEM_PWR_STATUS2 0x0f4a +#define mmCM1_CM_MEM_PWR_STATUS2_BASE_IDX 2 +#define mmCM1_CM_3DLUT_MODE 0x0f4b +#define mmCM1_CM_3DLUT_MODE_BASE_IDX 2 +#define mmCM1_CM_3DLUT_INDEX 0x0f4c +#define mmCM1_CM_3DLUT_INDEX_BASE_IDX 2 +#define mmCM1_CM_3DLUT_DATA 0x0f4d +#define mmCM1_CM_3DLUT_DATA_BASE_IDX 2 +#define mmCM1_CM_3DLUT_DATA_30BIT 0x0f4e +#define mmCM1_CM_3DLUT_DATA_30BIT_BASE_IDX 2 +#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL 0x0f4f +#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2 +#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR 0x0f50 +#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2 +#define mmCM1_CM_3DLUT_OUT_OFFSET_R 0x0f51 +#define mmCM1_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2 +#define mmCM1_CM_3DLUT_OUT_OFFSET_G 0x0f52 +#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 +#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f53 +#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f54 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f55 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec +// base address: 0xb58 +#define mmDPP_TOP2_DPP_CONTROL 0x0f9b +#define mmDPP_TOP2_DPP_CONTROL_BASE_IDX 2 +#define mmDPP_TOP2_DPP_SOFT_RESET 0x0f9c +#define mmDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2 +#define mmDPP_TOP2_DPP_CRC_VAL_R_G 0x0f9d +#define mmDPP_TOP2_DPP_CRC_VAL_R_G_BASE_IDX 2 +#define mmDPP_TOP2_DPP_CRC_VAL_B_A 0x0f9e +#define mmDPP_TOP2_DPP_CRC_VAL_B_A_BASE_IDX 2 +#define mmDPP_TOP2_DPP_CRC_CTRL 0x0f9f +#define mmDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2 + +// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec +// base address: 0xb58 +#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5 +#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2 +#define mmCNVC_CFG2_FORMAT_CONTROL 0x0fa6 +#define mmCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7 +#define mmCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8 +#define mmCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9 +#define mmCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa +#define mmCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab +#define mmCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2 +#define mmCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac +#define mmCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2 +#define mmCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad +#define mmCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae +#define mmCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2 +#define mmCNVC_CFG2_COLOR_KEYER_RED 0x0faf +#define mmCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2 +#define mmCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0 +#define mmCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2 +#define mmCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1 +#define mmCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2 +#define mmCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3 +#define mmCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec +// base address: 0xb58 +#define mmCNVC_CUR2_CURSOR0_CONTROL 0x0fb6 +#define mmCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2 +#define mmCNVC_CUR2_CURSOR0_COLOR0 0x0fb7 +#define mmCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2 +#define mmCNVC_CUR2_CURSOR0_COLOR1 0x0fb8 +#define mmCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2 +#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fb9 +#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec +// base address: 0xb58 +#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fc0 +#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2 +#define mmDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fc1 +#define mmDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2 +#define mmDSCL2_SCL_MODE 0x0fc2 +#define mmDSCL2_SCL_MODE_BASE_IDX 2 +#define mmDSCL2_SCL_TAP_CONTROL 0x0fc3 +#define mmDSCL2_SCL_TAP_CONTROL_BASE_IDX 2 +#define mmDSCL2_DSCL_CONTROL 0x0fc4 +#define mmDSCL2_DSCL_CONTROL_BASE_IDX 2 +#define mmDSCL2_DSCL_2TAP_CONTROL 0x0fc5 +#define mmDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2 +#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fc6 +#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2 +#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fc7 +#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL2_SCL_HORZ_FILTER_INIT 0x0fc8 +#define mmDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2 +#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fc9 +#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fca +#define mmDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fcb +#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_INIT 0x0fcc +#define mmDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fcd +#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fce +#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_INIT_C 0x0fcf +#define mmDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fd0 +#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2 +#define mmDSCL2_SCL_BLACK_OFFSET 0x0fd1 +#define mmDSCL2_SCL_BLACK_OFFSET_BASE_IDX 2 +#define mmDSCL2_DSCL_UPDATE 0x0fd2 +#define mmDSCL2_DSCL_UPDATE_BASE_IDX 2 +#define mmDSCL2_DSCL_AUTOCAL 0x0fd3 +#define mmDSCL2_DSCL_AUTOCAL_BASE_IDX 2 +#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fd4 +#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2 +#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fd5 +#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2 +#define mmDSCL2_OTG_H_BLANK 0x0fd6 +#define mmDSCL2_OTG_H_BLANK_BASE_IDX 2 +#define mmDSCL2_OTG_V_BLANK 0x0fd7 +#define mmDSCL2_OTG_V_BLANK_BASE_IDX 2 +#define mmDSCL2_RECOUT_START 0x0fd8 +#define mmDSCL2_RECOUT_START_BASE_IDX 2 +#define mmDSCL2_RECOUT_SIZE 0x0fd9 +#define mmDSCL2_RECOUT_SIZE_BASE_IDX 2 +#define mmDSCL2_MPC_SIZE 0x0fda +#define mmDSCL2_MPC_SIZE_BASE_IDX 2 +#define mmDSCL2_LB_DATA_FORMAT 0x0fdb +#define mmDSCL2_LB_DATA_FORMAT_BASE_IDX 2 +#define mmDSCL2_LB_MEMORY_CTRL 0x0fdc +#define mmDSCL2_LB_MEMORY_CTRL_BASE_IDX 2 +#define mmDSCL2_LB_V_COUNTER 0x0fdd +#define mmDSCL2_LB_V_COUNTER_BASE_IDX 2 +#define mmDSCL2_DSCL_MEM_PWR_CTRL 0x0fde +#define mmDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDSCL2_DSCL_MEM_PWR_STATUS 0x0fdf +#define mmDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDSCL2_OBUF_CONTROL 0x0fe0 +#define mmDSCL2_OBUF_CONTROL_BASE_IDX 2 +#define mmDSCL2_OBUF_MEM_PWR_CTRL 0x0fe1 +#define mmDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec +// base address: 0xb58 +#define mmCM2_CM_CONTROL 0x0ff0 +#define mmCM2_CM_CONTROL_BASE_IDX 2 +#define mmCM2_CM_ICSC_CONTROL 0x0ff1 +#define mmCM2_CM_ICSC_CONTROL_BASE_IDX 2 +#define mmCM2_CM_ICSC_C11_C12 0x0ff2 +#define mmCM2_CM_ICSC_C11_C12_BASE_IDX 2 +#define mmCM2_CM_ICSC_C13_C14 0x0ff3 +#define mmCM2_CM_ICSC_C13_C14_BASE_IDX 2 +#define mmCM2_CM_ICSC_C21_C22 0x0ff4 +#define mmCM2_CM_ICSC_C21_C22_BASE_IDX 2 +#define mmCM2_CM_ICSC_C23_C24 0x0ff5 +#define mmCM2_CM_ICSC_C23_C24_BASE_IDX 2 +#define mmCM2_CM_ICSC_C31_C32 0x0ff6 +#define mmCM2_CM_ICSC_C31_C32_BASE_IDX 2 +#define mmCM2_CM_ICSC_C33_C34 0x0ff7 +#define mmCM2_CM_ICSC_C33_C34_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C11_C12 0x0ff8 +#define mmCM2_CM_ICSC_B_C11_C12_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C13_C14 0x0ff9 +#define mmCM2_CM_ICSC_B_C13_C14_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C21_C22 0x0ffa +#define mmCM2_CM_ICSC_B_C21_C22_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C23_C24 0x0ffb +#define mmCM2_CM_ICSC_B_C23_C24_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C31_C32 0x0ffc +#define mmCM2_CM_ICSC_B_C31_C32_BASE_IDX 2 +#define mmCM2_CM_ICSC_B_C33_C34 0x0ffd +#define mmCM2_CM_ICSC_B_C33_C34_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_CONTROL 0x0ffe +#define mmCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C11_C12 0x0fff +#define mmCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C13_C14 0x1000 +#define mmCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C21_C22 0x1001 +#define mmCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C23_C24 0x1002 +#define mmCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C31_C32 0x1003 +#define mmCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_C33_C34 0x1004 +#define mmCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C11_C12 0x1005 +#define mmCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C13_C14 0x1006 +#define mmCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C21_C22 0x1007 +#define mmCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C23_C24 0x1008 +#define mmCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C31_C32 0x1009 +#define mmCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2 +#define mmCM2_CM_GAMUT_REMAP_B_C33_C34 0x100a +#define mmCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2 +#define mmCM2_CM_BIAS_CR_R 0x100b +#define mmCM2_CM_BIAS_CR_R_BASE_IDX 2 +#define mmCM2_CM_BIAS_Y_G_CB_B 0x100c +#define mmCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_CONTROL 0x100d +#define mmCM2_CM_DGAM_CONTROL_BASE_IDX 2 +#define mmCM2_CM_DGAM_LUT_INDEX 0x100e +#define mmCM2_CM_DGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM2_CM_DGAM_LUT_DATA 0x100f +#define mmCM2_CM_DGAM_LUT_DATA_BASE_IDX 2 +#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK 0x1010 +#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_B 0x1011 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_G 0x1012 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_R 0x1013 +#define mmCM2_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B 0x1014 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1015 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1016 +#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B 0x1017 +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B 0x1018 +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G 0x1019 +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G 0x101a +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R 0x101b +#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R 0x101c +#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_0_1 0x101d +#define mmCM2_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_2_3 0x101e +#define mmCM2_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_4_5 0x101f +#define mmCM2_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_6_7 0x1020 +#define mmCM2_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_8_9 0x1021 +#define mmCM2_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_10_11 0x1022 +#define mmCM2_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_12_13 0x1023 +#define mmCM2_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMA_REGION_14_15 0x1024 +#define mmCM2_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_B 0x1025 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_G 0x1026 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_R 0x1027 +#define mmCM2_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1028 +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1029 +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R 0x102a +#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B 0x102b +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B 0x102c +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G 0x102d +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G 0x102e +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R 0x102f +#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R 0x1030 +#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_0_1 0x1031 +#define mmCM2_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_2_3 0x1032 +#define mmCM2_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_4_5 0x1033 +#define mmCM2_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_6_7 0x1034 +#define mmCM2_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_8_9 0x1035 +#define mmCM2_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_10_11 0x1036 +#define mmCM2_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_12_13 0x1037 +#define mmCM2_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_DGAM_RAMB_REGION_14_15 0x1038 +#define mmCM2_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_CONTROL 0x1039 +#define mmCM2_CM_BLNDGAM_CONTROL_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_LUT_INDEX 0x103a +#define mmCM2_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_LUT_DATA 0x103b +#define mmCM2_CM_BLNDGAM_LUT_DATA_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x103c +#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B 0x103d +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G 0x103e +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R 0x103f +#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x1040 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x1041 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x1042 +#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B 0x1043 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B 0x1044 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G 0x1045 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G 0x1046 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R 0x1047 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R 0x1048 +#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1 0x1049 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3 0x104a +#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5 0x104b +#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7 0x104c +#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9 0x104d +#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11 0x104e +#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13 0x104f +#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15 0x1050 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17 0x1051 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19 0x1052 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21 0x1053 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23 0x1054 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25 0x1055 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27 0x1056 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29 0x1057 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31 0x1058 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33 0x1059 +#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B 0x105a +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G 0x105b +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R 0x105c +#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x105d +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x105e +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x105f +#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B 0x1060 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B 0x1061 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G 0x1062 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G 0x1063 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R 0x1064 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R 0x1065 +#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1 0x1066 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3 0x1067 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5 0x1068 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7 0x1069 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9 0x106a +#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11 0x106b +#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13 0x106c +#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15 0x106d +#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17 0x106e +#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19 0x106f +#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21 0x1070 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23 0x1071 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25 0x1072 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27 0x1073 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29 0x1074 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31 0x1075 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33 0x1076 +#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM2_CM_HDR_MULT_COEF 0x1077 +#define mmCM2_CM_HDR_MULT_COEF_BASE_IDX 2 +#define mmCM2_CM_MEM_PWR_CTRL 0x1078 +#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCM2_CM_MEM_PWR_STATUS 0x1079 +#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCM2_CM_DEALPHA 0x107b +#define mmCM2_CM_DEALPHA_BASE_IDX 2 +#define mmCM2_CM_COEF_FORMAT 0x107c +#define mmCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define mmCM2_CM_SHAPER_CONTROL 0x107d +#define mmCM2_CM_SHAPER_CONTROL_BASE_IDX 2 +#define mmCM2_CM_SHAPER_OFFSET_R 0x107e +#define mmCM2_CM_SHAPER_OFFSET_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_OFFSET_G 0x107f +#define mmCM2_CM_SHAPER_OFFSET_G_BASE_IDX 2 +#define mmCM2_CM_SHAPER_OFFSET_B 0x1080 +#define mmCM2_CM_SHAPER_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_SCALE_R 0x1081 +#define mmCM2_CM_SHAPER_SCALE_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_SCALE_G_B 0x1082 +#define mmCM2_CM_SHAPER_SCALE_G_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_LUT_INDEX 0x1083 +#define mmCM2_CM_SHAPER_LUT_INDEX_BASE_IDX 2 +#define mmCM2_CM_SHAPER_LUT_DATA 0x1084 +#define mmCM2_CM_SHAPER_LUT_DATA_BASE_IDX 2 +#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK 0x1085 +#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B 0x1086 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G 0x1087 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R 0x1088 +#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B 0x1089 +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G 0x108a +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R 0x108b +#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_0_1 0x108c +#define mmCM2_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_2_3 0x108d +#define mmCM2_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_4_5 0x108e +#define mmCM2_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_6_7 0x108f +#define mmCM2_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_8_9 0x1090 +#define mmCM2_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_10_11 0x1091 +#define mmCM2_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_12_13 0x1092 +#define mmCM2_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_14_15 0x1093 +#define mmCM2_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_16_17 0x1094 +#define mmCM2_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_18_19 0x1095 +#define mmCM2_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_20_21 0x1096 +#define mmCM2_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_22_23 0x1097 +#define mmCM2_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_24_25 0x1098 +#define mmCM2_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_26_27 0x1099 +#define mmCM2_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_28_29 0x109a +#define mmCM2_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_30_31 0x109b +#define mmCM2_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMA_REGION_32_33 0x109c +#define mmCM2_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B 0x109d +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G 0x109e +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R 0x109f +#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B 0x10a0 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G 0x10a1 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R 0x10a2 +#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_0_1 0x10a3 +#define mmCM2_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_2_3 0x10a4 +#define mmCM2_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_4_5 0x10a5 +#define mmCM2_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_6_7 0x10a6 +#define mmCM2_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_8_9 0x10a7 +#define mmCM2_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_10_11 0x10a8 +#define mmCM2_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_12_13 0x10a9 +#define mmCM2_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_14_15 0x10aa +#define mmCM2_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_16_17 0x10ab +#define mmCM2_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_18_19 0x10ac +#define mmCM2_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_20_21 0x10ad +#define mmCM2_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_22_23 0x10ae +#define mmCM2_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_24_25 0x10af +#define mmCM2_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_26_27 0x10b0 +#define mmCM2_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_28_29 0x10b1 +#define mmCM2_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_30_31 0x10b2 +#define mmCM2_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM2_CM_SHAPER_RAMB_REGION_32_33 0x10b3 +#define mmCM2_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM2_CM_MEM_PWR_CTRL2 0x10b4 +#define mmCM2_CM_MEM_PWR_CTRL2_BASE_IDX 2 +#define mmCM2_CM_MEM_PWR_STATUS2 0x10b5 +#define mmCM2_CM_MEM_PWR_STATUS2_BASE_IDX 2 +#define mmCM2_CM_3DLUT_MODE 0x10b6 +#define mmCM2_CM_3DLUT_MODE_BASE_IDX 2 +#define mmCM2_CM_3DLUT_INDEX 0x10b7 +#define mmCM2_CM_3DLUT_INDEX_BASE_IDX 2 +#define mmCM2_CM_3DLUT_DATA 0x10b8 +#define mmCM2_CM_3DLUT_DATA_BASE_IDX 2 +#define mmCM2_CM_3DLUT_DATA_30BIT 0x10b9 +#define mmCM2_CM_3DLUT_DATA_30BIT_BASE_IDX 2 +#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL 0x10ba +#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2 +#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR 0x10bb +#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2 +#define mmCM2_CM_3DLUT_OUT_OFFSET_R 0x10bc +#define mmCM2_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2 +#define mmCM2_CM_3DLUT_OUT_OFFSET_G 0x10bd +#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 +#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10be +#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_INDEX 0x10bf +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x10c0 +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 + +// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec +// base address: 0x1104 +#define mmDPP_TOP3_DPP_CONTROL 0x1106 +#define mmDPP_TOP3_DPP_CONTROL_BASE_IDX 2 +#define mmDPP_TOP3_DPP_SOFT_RESET 0x1107 +#define mmDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2 +#define mmDPP_TOP3_DPP_CRC_VAL_R_G 0x1108 +#define mmDPP_TOP3_DPP_CRC_VAL_R_G_BASE_IDX 2 +#define mmDPP_TOP3_DPP_CRC_VAL_B_A 0x1109 +#define mmDPP_TOP3_DPP_CRC_VAL_B_A_BASE_IDX 2 +#define mmDPP_TOP3_DPP_CRC_CTRL 0x110a +#define mmDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec +// base address: 0x1104 +#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110 +#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2 +#define mmCNVC_CFG3_FORMAT_CONTROL 0x1111 +#define mmCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_BIAS_R 0x1112 +#define mmCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_BIAS_G 0x1113 +#define mmCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_BIAS_B 0x1114 +#define mmCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_SCALE_R 0x1115 +#define mmCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_SCALE_G 0x1116 +#define mmCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2 +#define mmCNVC_CFG3_FCNV_FP_SCALE_B 0x1117 +#define mmCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2 +#define mmCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118 +#define mmCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2 +#define mmCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119 +#define mmCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2 +#define mmCNVC_CFG3_COLOR_KEYER_RED 0x111a +#define mmCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2 +#define mmCNVC_CFG3_COLOR_KEYER_GREEN 0x111b +#define mmCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2 +#define mmCNVC_CFG3_COLOR_KEYER_BLUE 0x111c +#define mmCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2 +#define mmCNVC_CFG3_ALPHA_2BIT_LUT 0x111e +#define mmCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec +// base address: 0x1104 +#define mmCNVC_CUR3_CURSOR0_CONTROL 0x1121 +#define mmCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2 +#define mmCNVC_CUR3_CURSOR0_COLOR0 0x1122 +#define mmCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2 +#define mmCNVC_CUR3_CURSOR0_COLOR1 0x1123 +#define mmCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2 +#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1124 +#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec +// base address: 0x1104 +#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT 0x112b +#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2 +#define mmDSCL3_SCL_COEF_RAM_TAP_DATA 0x112c +#define mmDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2 +#define mmDSCL3_SCL_MODE 0x112d +#define mmDSCL3_SCL_MODE_BASE_IDX 2 +#define mmDSCL3_SCL_TAP_CONTROL 0x112e +#define mmDSCL3_SCL_TAP_CONTROL_BASE_IDX 2 +#define mmDSCL3_DSCL_CONTROL 0x112f +#define mmDSCL3_DSCL_CONTROL_BASE_IDX 2 +#define mmDSCL3_DSCL_2TAP_CONTROL 0x1130 +#define mmDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2 +#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1131 +#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2 +#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1132 +#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL3_SCL_HORZ_FILTER_INIT 0x1133 +#define mmDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2 +#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1134 +#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL3_SCL_HORZ_FILTER_INIT_C 0x1135 +#define mmDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1136 +#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_INIT 0x1137 +#define mmDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1138 +#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1139 +#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_INIT_C 0x113a +#define mmDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2 +#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x113b +#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2 +#define mmDSCL3_SCL_BLACK_OFFSET 0x113c +#define mmDSCL3_SCL_BLACK_OFFSET_BASE_IDX 2 +#define mmDSCL3_DSCL_UPDATE 0x113d +#define mmDSCL3_DSCL_UPDATE_BASE_IDX 2 +#define mmDSCL3_DSCL_AUTOCAL 0x113e +#define mmDSCL3_DSCL_AUTOCAL_BASE_IDX 2 +#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x113f +#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2 +#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x1140 +#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2 +#define mmDSCL3_OTG_H_BLANK 0x1141 +#define mmDSCL3_OTG_H_BLANK_BASE_IDX 2 +#define mmDSCL3_OTG_V_BLANK 0x1142 +#define mmDSCL3_OTG_V_BLANK_BASE_IDX 2 +#define mmDSCL3_RECOUT_START 0x1143 +#define mmDSCL3_RECOUT_START_BASE_IDX 2 +#define mmDSCL3_RECOUT_SIZE 0x1144 +#define mmDSCL3_RECOUT_SIZE_BASE_IDX 2 +#define mmDSCL3_MPC_SIZE 0x1145 +#define mmDSCL3_MPC_SIZE_BASE_IDX 2 +#define mmDSCL3_LB_DATA_FORMAT 0x1146 +#define mmDSCL3_LB_DATA_FORMAT_BASE_IDX 2 +#define mmDSCL3_LB_MEMORY_CTRL 0x1147 +#define mmDSCL3_LB_MEMORY_CTRL_BASE_IDX 2 +#define mmDSCL3_LB_V_COUNTER 0x1148 +#define mmDSCL3_LB_V_COUNTER_BASE_IDX 2 +#define mmDSCL3_DSCL_MEM_PWR_CTRL 0x1149 +#define mmDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDSCL3_DSCL_MEM_PWR_STATUS 0x114a +#define mmDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDSCL3_OBUF_CONTROL 0x114b +#define mmDSCL3_OBUF_CONTROL_BASE_IDX 2 +#define mmDSCL3_OBUF_MEM_PWR_CTRL 0x114c +#define mmDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2 + + +// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec +// base address: 0x1104 +#define mmCM3_CM_CONTROL 0x115b +#define mmCM3_CM_CONTROL_BASE_IDX 2 +#define mmCM3_CM_ICSC_CONTROL 0x115c +#define mmCM3_CM_ICSC_CONTROL_BASE_IDX 2 +#define mmCM3_CM_ICSC_C11_C12 0x115d +#define mmCM3_CM_ICSC_C11_C12_BASE_IDX 2 +#define mmCM3_CM_ICSC_C13_C14 0x115e +#define mmCM3_CM_ICSC_C13_C14_BASE_IDX 2 +#define mmCM3_CM_ICSC_C21_C22 0x115f +#define mmCM3_CM_ICSC_C21_C22_BASE_IDX 2 +#define mmCM3_CM_ICSC_C23_C24 0x1160 +#define mmCM3_CM_ICSC_C23_C24_BASE_IDX 2 +#define mmCM3_CM_ICSC_C31_C32 0x1161 +#define mmCM3_CM_ICSC_C31_C32_BASE_IDX 2 +#define mmCM3_CM_ICSC_C33_C34 0x1162 +#define mmCM3_CM_ICSC_C33_C34_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C11_C12 0x1163 +#define mmCM3_CM_ICSC_B_C11_C12_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C13_C14 0x1164 +#define mmCM3_CM_ICSC_B_C13_C14_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C21_C22 0x1165 +#define mmCM3_CM_ICSC_B_C21_C22_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C23_C24 0x1166 +#define mmCM3_CM_ICSC_B_C23_C24_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C31_C32 0x1167 +#define mmCM3_CM_ICSC_B_C31_C32_BASE_IDX 2 +#define mmCM3_CM_ICSC_B_C33_C34 0x1168 +#define mmCM3_CM_ICSC_B_C33_C34_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_CONTROL 0x1169 +#define mmCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C11_C12 0x116a +#define mmCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C13_C14 0x116b +#define mmCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C21_C22 0x116c +#define mmCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C23_C24 0x116d +#define mmCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C31_C32 0x116e +#define mmCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_C33_C34 0x116f +#define mmCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C11_C12 0x1170 +#define mmCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C13_C14 0x1171 +#define mmCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C21_C22 0x1172 +#define mmCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C23_C24 0x1173 +#define mmCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C31_C32 0x1174 +#define mmCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2 +#define mmCM3_CM_GAMUT_REMAP_B_C33_C34 0x1175 +#define mmCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2 +#define mmCM3_CM_BIAS_CR_R 0x1176 +#define mmCM3_CM_BIAS_CR_R_BASE_IDX 2 +#define mmCM3_CM_BIAS_Y_G_CB_B 0x1177 +#define mmCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_CONTROL 0x1178 +#define mmCM3_CM_DGAM_CONTROL_BASE_IDX 2 +#define mmCM3_CM_DGAM_LUT_INDEX 0x1179 +#define mmCM3_CM_DGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM3_CM_DGAM_LUT_DATA 0x117a +#define mmCM3_CM_DGAM_LUT_DATA_BASE_IDX 2 +#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK 0x117b +#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_START_CNTL_B 0x117c +#define mmCM3_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_START_CNTL_G 0x117d +#define mmCM3_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_START_CNTL_R 0x117e +#define mmCM3_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B 0x117f +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1180 +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1181 +#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B 0x1182 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B 0x1183 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G 0x1184 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G 0x1185 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R 0x1186 +#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R 0x1187 +#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_0_1 0x1188 +#define mmCM3_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_2_3 0x1189 +#define mmCM3_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_4_5 0x118a +#define mmCM3_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_6_7 0x118b +#define mmCM3_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_8_9 0x118c +#define mmCM3_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_10_11 0x118d +#define mmCM3_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_12_13 0x118e +#define mmCM3_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMA_REGION_14_15 0x118f +#define mmCM3_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_B 0x1190 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_G 0x1191 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_R 0x1192 +#define mmCM3_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1193 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1194 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R 0x1195 +#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B 0x1196 +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B 0x1197 +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G 0x1198 +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G 0x1199 +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R 0x119a +#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R 0x119b +#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_0_1 0x119c +#define mmCM3_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_2_3 0x119d +#define mmCM3_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_4_5 0x119e +#define mmCM3_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_6_7 0x119f +#define mmCM3_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_8_9 0x11a0 +#define mmCM3_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_10_11 0x11a1 +#define mmCM3_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_12_13 0x11a2 +#define mmCM3_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_DGAM_RAMB_REGION_14_15 0x11a3 +#define mmCM3_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_CONTROL 0x11a4 +#define mmCM3_CM_BLNDGAM_CONTROL_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_LUT_INDEX 0x11a5 +#define mmCM3_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_LUT_DATA 0x11a6 +#define mmCM3_CM_BLNDGAM_LUT_DATA_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x11a7 +#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B 0x11a8 +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G 0x11a9 +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R 0x11aa +#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x11ab +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x11ac +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x11ad +#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B 0x11ae +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B 0x11af +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G 0x11b0 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G 0x11b1 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R 0x11b2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R 0x11b3 +#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1 0x11b4 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3 0x11b5 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5 0x11b6 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7 0x11b7 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9 0x11b8 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11 0x11b9 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13 0x11ba +#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15 0x11bb +#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17 0x11bc +#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19 0x11bd +#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21 0x11be +#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23 0x11bf +#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25 0x11c0 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27 0x11c1 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29 0x11c2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31 0x11c3 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33 0x11c4 +#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B 0x11c5 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G 0x11c6 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R 0x11c7 +#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x11c8 +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x11c9 +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x11ca +#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B 0x11cb +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B 0x11cc +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G 0x11cd +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G 0x11ce +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R 0x11cf +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R 0x11d0 +#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1 0x11d1 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3 0x11d2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5 0x11d3 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7 0x11d4 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9 0x11d5 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11 0x11d6 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13 0x11d7 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15 0x11d8 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17 0x11d9 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19 0x11da +#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21 0x11db +#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23 0x11dc +#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25 0x11dd +#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27 0x11de +#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29 0x11df +#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31 0x11e0 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33 0x11e1 +#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM3_CM_HDR_MULT_COEF 0x11e2 +#define mmCM3_CM_HDR_MULT_COEF_BASE_IDX 2 +#define mmCM3_CM_MEM_PWR_CTRL 0x11e3 +#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2 +#define mmCM3_CM_MEM_PWR_STATUS 0x11e4 +#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCM3_CM_DEALPHA 0x11e6 +#define mmCM3_CM_DEALPHA_BASE_IDX 2 +#define mmCM3_CM_COEF_FORMAT 0x11e7 +#define mmCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define mmCM3_CM_SHAPER_CONTROL 0x11e8 +#define mmCM3_CM_SHAPER_CONTROL_BASE_IDX 2 +#define mmCM3_CM_SHAPER_OFFSET_R 0x11e9 +#define mmCM3_CM_SHAPER_OFFSET_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_OFFSET_G 0x11ea +#define mmCM3_CM_SHAPER_OFFSET_G_BASE_IDX 2 +#define mmCM3_CM_SHAPER_OFFSET_B 0x11eb +#define mmCM3_CM_SHAPER_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_SCALE_R 0x11ec +#define mmCM3_CM_SHAPER_SCALE_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_SCALE_G_B 0x11ed +#define mmCM3_CM_SHAPER_SCALE_G_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_LUT_INDEX 0x11ee +#define mmCM3_CM_SHAPER_LUT_INDEX_BASE_IDX 2 +#define mmCM3_CM_SHAPER_LUT_DATA 0x11ef +#define mmCM3_CM_SHAPER_LUT_DATA_BASE_IDX 2 +#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK 0x11f0 +#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B 0x11f1 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G 0x11f2 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R 0x11f3 +#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B 0x11f4 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G 0x11f5 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R 0x11f6 +#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_0_1 0x11f7 +#define mmCM3_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_2_3 0x11f8 +#define mmCM3_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_4_5 0x11f9 +#define mmCM3_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_6_7 0x11fa +#define mmCM3_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_8_9 0x11fb +#define mmCM3_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_10_11 0x11fc +#define mmCM3_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_12_13 0x11fd +#define mmCM3_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_14_15 0x11fe +#define mmCM3_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_16_17 0x11ff +#define mmCM3_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_18_19 0x1200 +#define mmCM3_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_20_21 0x1201 +#define mmCM3_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_22_23 0x1202 +#define mmCM3_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_24_25 0x1203 +#define mmCM3_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_26_27 0x1204 +#define mmCM3_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_28_29 0x1205 +#define mmCM3_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_30_31 0x1206 +#define mmCM3_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMA_REGION_32_33 0x1207 +#define mmCM3_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B 0x1208 +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G 0x1209 +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R 0x120a +#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B 0x120b +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G 0x120c +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R 0x120d +#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_0_1 0x120e +#define mmCM3_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_2_3 0x120f +#define mmCM3_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_4_5 0x1210 +#define mmCM3_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_6_7 0x1211 +#define mmCM3_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_8_9 0x1212 +#define mmCM3_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_10_11 0x1213 +#define mmCM3_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_12_13 0x1214 +#define mmCM3_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_14_15 0x1215 +#define mmCM3_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_16_17 0x1216 +#define mmCM3_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_18_19 0x1217 +#define mmCM3_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_20_21 0x1218 +#define mmCM3_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_22_23 0x1219 +#define mmCM3_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_24_25 0x121a +#define mmCM3_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_26_27 0x121b +#define mmCM3_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_28_29 0x121c +#define mmCM3_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_30_31 0x121d +#define mmCM3_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2 +#define mmCM3_CM_SHAPER_RAMB_REGION_32_33 0x121e +#define mmCM3_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2 +#define mmCM3_CM_MEM_PWR_CTRL2 0x121f +#define mmCM3_CM_MEM_PWR_CTRL2_BASE_IDX 2 +#define mmCM3_CM_MEM_PWR_STATUS2 0x1220 +#define mmCM3_CM_MEM_PWR_STATUS2_BASE_IDX 2 +#define mmCM3_CM_3DLUT_MODE 0x1221 +#define mmCM3_CM_3DLUT_MODE_BASE_IDX 2 +#define mmCM3_CM_3DLUT_INDEX 0x1222 +#define mmCM3_CM_3DLUT_INDEX_BASE_IDX 2 +#define mmCM3_CM_3DLUT_DATA 0x1223 +#define mmCM3_CM_3DLUT_DATA_BASE_IDX 2 +#define mmCM3_CM_3DLUT_DATA_30BIT 0x1224 +#define mmCM3_CM_3DLUT_DATA_30BIT_BASE_IDX 2 +#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL 0x1225 +#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2 +#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR 0x1226 +#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2 +#define mmCM3_CM_3DLUT_OUT_OFFSET_R 0x1227 +#define mmCM3_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2 +#define mmCM3_CM_3DLUT_OUT_OFFSET_G 0x1228 +#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 +#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1229 +#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_INDEX 0x122a +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x122b +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc0_dispdec +// base address: 0x0 +#define mmMPCC0_MPCC_TOP_SEL 0x1271 +#define mmMPCC0_MPCC_TOP_SEL_BASE_IDX 2 +#define mmMPCC0_MPCC_BOT_SEL 0x1272 +#define mmMPCC0_MPCC_BOT_SEL_BASE_IDX 2 +#define mmMPCC0_MPCC_OPP_ID 0x1273 +#define mmMPCC0_MPCC_OPP_ID_BASE_IDX 2 +#define mmMPCC0_MPCC_CONTROL 0x1274 +#define mmMPCC0_MPCC_CONTROL_BASE_IDX 2 +#define mmMPCC0_MPCC_SM_CONTROL 0x1275 +#define mmMPCC0_MPCC_SM_CONTROL_BASE_IDX 2 +#define mmMPCC0_MPCC_UPDATE_LOCK_SEL 0x1276 +#define mmMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2 +#define mmMPCC0_MPCC_TOP_GAIN 0x1277 +#define mmMPCC0_MPCC_TOP_GAIN_BASE_IDX 2 +#define mmMPCC0_MPCC_BOT_GAIN_INSIDE 0x1278 +#define mmMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2 +#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x1279 +#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2 +#define mmMPCC0_MPCC_BG_R_CR 0x127a +#define mmMPCC0_MPCC_BG_R_CR_BASE_IDX 2 +#define mmMPCC0_MPCC_BG_G_Y 0x127b +#define mmMPCC0_MPCC_BG_G_Y_BASE_IDX 2 +#define mmMPCC0_MPCC_BG_B_CB 0x127c +#define mmMPCC0_MPCC_BG_B_CB_BASE_IDX 2 +#define mmMPCC0_MPCC_MEM_PWR_CTRL 0x127d +#define mmMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 2 +#define mmMPCC0_MPCC_STALL_STATUS 0x127e +#define mmMPCC0_MPCC_STALL_STATUS_BASE_IDX 2 +#define mmMPCC0_MPCC_STATUS 0x127f +#define mmMPCC0_MPCC_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc1_dispdec +// base address: 0x6c +#define mmMPCC1_MPCC_TOP_SEL 0x128c +#define mmMPCC1_MPCC_TOP_SEL_BASE_IDX 2 +#define mmMPCC1_MPCC_BOT_SEL 0x128d +#define mmMPCC1_MPCC_BOT_SEL_BASE_IDX 2 +#define mmMPCC1_MPCC_OPP_ID 0x128e +#define mmMPCC1_MPCC_OPP_ID_BASE_IDX 2 +#define mmMPCC1_MPCC_CONTROL 0x128f +#define mmMPCC1_MPCC_CONTROL_BASE_IDX 2 +#define mmMPCC1_MPCC_SM_CONTROL 0x1290 +#define mmMPCC1_MPCC_SM_CONTROL_BASE_IDX 2 +#define mmMPCC1_MPCC_UPDATE_LOCK_SEL 0x1291 +#define mmMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2 +#define mmMPCC1_MPCC_TOP_GAIN 0x1292 +#define mmMPCC1_MPCC_TOP_GAIN_BASE_IDX 2 +#define mmMPCC1_MPCC_BOT_GAIN_INSIDE 0x1293 +#define mmMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2 +#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x1294 +#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2 +#define mmMPCC1_MPCC_BG_R_CR 0x1295 +#define mmMPCC1_MPCC_BG_R_CR_BASE_IDX 2 +#define mmMPCC1_MPCC_BG_G_Y 0x1296 +#define mmMPCC1_MPCC_BG_G_Y_BASE_IDX 2 +#define mmMPCC1_MPCC_BG_B_CB 0x1297 +#define mmMPCC1_MPCC_BG_B_CB_BASE_IDX 2 +#define mmMPCC1_MPCC_MEM_PWR_CTRL 0x1298 +#define mmMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 2 +#define mmMPCC1_MPCC_STALL_STATUS 0x1299 +#define mmMPCC1_MPCC_STALL_STATUS_BASE_IDX 2 +#define mmMPCC1_MPCC_STATUS 0x129a +#define mmMPCC1_MPCC_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc2_dispdec +// base address: 0xd8 +#define mmMPCC2_MPCC_TOP_SEL 0x12a7 +#define mmMPCC2_MPCC_TOP_SEL_BASE_IDX 2 +#define mmMPCC2_MPCC_BOT_SEL 0x12a8 +#define mmMPCC2_MPCC_BOT_SEL_BASE_IDX 2 +#define mmMPCC2_MPCC_OPP_ID 0x12a9 +#define mmMPCC2_MPCC_OPP_ID_BASE_IDX 2 +#define mmMPCC2_MPCC_CONTROL 0x12aa +#define mmMPCC2_MPCC_CONTROL_BASE_IDX 2 +#define mmMPCC2_MPCC_SM_CONTROL 0x12ab +#define mmMPCC2_MPCC_SM_CONTROL_BASE_IDX 2 +#define mmMPCC2_MPCC_UPDATE_LOCK_SEL 0x12ac +#define mmMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2 +#define mmMPCC2_MPCC_TOP_GAIN 0x12ad +#define mmMPCC2_MPCC_TOP_GAIN_BASE_IDX 2 +#define mmMPCC2_MPCC_BOT_GAIN_INSIDE 0x12ae +#define mmMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2 +#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x12af +#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2 +#define mmMPCC2_MPCC_BG_R_CR 0x12b0 +#define mmMPCC2_MPCC_BG_R_CR_BASE_IDX 2 +#define mmMPCC2_MPCC_BG_G_Y 0x12b1 +#define mmMPCC2_MPCC_BG_G_Y_BASE_IDX 2 +#define mmMPCC2_MPCC_BG_B_CB 0x12b2 +#define mmMPCC2_MPCC_BG_B_CB_BASE_IDX 2 +#define mmMPCC2_MPCC_MEM_PWR_CTRL 0x12b3 +#define mmMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 2 +#define mmMPCC2_MPCC_STALL_STATUS 0x12b4 +#define mmMPCC2_MPCC_STALL_STATUS_BASE_IDX 2 +#define mmMPCC2_MPCC_STATUS 0x12b5 +#define mmMPCC2_MPCC_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc3_dispdec +// base address: 0x144 +#define mmMPCC3_MPCC_TOP_SEL 0x12c2 +#define mmMPCC3_MPCC_TOP_SEL_BASE_IDX 2 +#define mmMPCC3_MPCC_BOT_SEL 0x12c3 +#define mmMPCC3_MPCC_BOT_SEL_BASE_IDX 2 +#define mmMPCC3_MPCC_OPP_ID 0x12c4 +#define mmMPCC3_MPCC_OPP_ID_BASE_IDX 2 +#define mmMPCC3_MPCC_CONTROL 0x12c5 +#define mmMPCC3_MPCC_CONTROL_BASE_IDX 2 +#define mmMPCC3_MPCC_SM_CONTROL 0x12c6 +#define mmMPCC3_MPCC_SM_CONTROL_BASE_IDX 2 +#define mmMPCC3_MPCC_UPDATE_LOCK_SEL 0x12c7 +#define mmMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2 +#define mmMPCC3_MPCC_TOP_GAIN 0x12c8 +#define mmMPCC3_MPCC_TOP_GAIN_BASE_IDX 2 +#define mmMPCC3_MPCC_BOT_GAIN_INSIDE 0x12c9 +#define mmMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2 +#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x12ca +#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2 +#define mmMPCC3_MPCC_BG_R_CR 0x12cb +#define mmMPCC3_MPCC_BG_R_CR_BASE_IDX 2 +#define mmMPCC3_MPCC_BG_G_Y 0x12cc +#define mmMPCC3_MPCC_BG_G_Y_BASE_IDX 2 +#define mmMPCC3_MPCC_BG_B_CB 0x12cd +#define mmMPCC3_MPCC_BG_B_CB_BASE_IDX 2 +#define mmMPCC3_MPCC_MEM_PWR_CTRL 0x12ce +#define mmMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 2 +#define mmMPCC3_MPCC_STALL_STATUS 0x12cf +#define mmMPCC3_MPCC_STALL_STATUS_BASE_IDX 2 +#define mmMPCC3_MPCC_STATUS 0x12d0 +#define mmMPCC3_MPCC_STATUS_BASE_IDX 2 + +// addressBlock: dce_dc_mpc_mpcc4_dispdec +// base address: 0x1b0 +#define mmMPCC4_MPCC_TOP_SEL 0x12dd +#define mmMPCC4_MPCC_TOP_SEL_BASE_IDX 2 +#define mmMPCC4_MPCC_BOT_SEL 0x12de +#define mmMPCC4_MPCC_BOT_SEL_BASE_IDX 2 +#define mmMPCC4_MPCC_OPP_ID 0x12df +#define mmMPCC4_MPCC_OPP_ID_BASE_IDX 2 +#define mmMPCC4_MPCC_CONTROL 0x12e0 +#define mmMPCC4_MPCC_CONTROL_BASE_IDX 2 +#define mmMPCC4_MPCC_SM_CONTROL 0x12e1 +#define mmMPCC4_MPCC_SM_CONTROL_BASE_IDX 2 +#define mmMPCC4_MPCC_UPDATE_LOCK_SEL 0x12e2 +#define mmMPCC4_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2 +#define mmMPCC4_MPCC_TOP_GAIN 0x12e3 +#define mmMPCC4_MPCC_TOP_GAIN_BASE_IDX 2 +#define mmMPCC4_MPCC_BOT_GAIN_INSIDE 0x12e4 +#define mmMPCC4_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2 +#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE 0x12e5 +#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2 +#define mmMPCC4_MPCC_BG_R_CR 0x12e6 +#define mmMPCC4_MPCC_BG_R_CR_BASE_IDX 2 +#define mmMPCC4_MPCC_BG_G_Y 0x12e7 +#define mmMPCC4_MPCC_BG_G_Y_BASE_IDX 2 +#define mmMPCC4_MPCC_BG_B_CB 0x12e8 +#define mmMPCC4_MPCC_BG_B_CB_BASE_IDX 2 +#define mmMPCC4_MPCC_MEM_PWR_CTRL 0x12e9 +#define mmMPCC4_MPCC_MEM_PWR_CTRL_BASE_IDX 2 +#define mmMPCC4_MPCC_STALL_STATUS 0x12ea +#define mmMPCC4_MPCC_STALL_STATUS_BASE_IDX 2 +#define mmMPCC4_MPCC_STATUS 0x12eb +#define mmMPCC4_MPCC_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpc_cfg_dispdec +// base address: 0x0 +#define mmMPC_CLOCK_CONTROL 0x1349 +#define mmMPC_CLOCK_CONTROL_BASE_IDX 2 +#define mmMPC_SOFT_RESET 0x134a +#define mmMPC_SOFT_RESET_BASE_IDX 2 +#define mmMPC_CRC_CTRL 0x134b +#define mmMPC_CRC_CTRL_BASE_IDX 2 +#define mmMPC_CRC_SEL_CONTROL 0x134c +#define mmMPC_CRC_SEL_CONTROL_BASE_IDX 2 +#define mmMPC_CRC_RESULT_AR 0x134d +#define mmMPC_CRC_RESULT_AR_BASE_IDX 2 +#define mmMPC_CRC_RESULT_GB 0x134e +#define mmMPC_CRC_RESULT_GB_BASE_IDX 2 +#define mmMPC_CRC_RESULT_C 0x134f +#define mmMPC_CRC_RESULT_C_BASE_IDX 2 +#define mmMPC_PERFMON_EVENT_CTRL 0x1352 +#define mmMPC_PERFMON_EVENT_CTRL_BASE_IDX 2 +#define mmMPC_BYPASS_BG_AR 0x1353 +#define mmMPC_BYPASS_BG_AR_BASE_IDX 2 +#define mmMPC_BYPASS_BG_GB 0x1354 +#define mmMPC_BYPASS_BG_GB_BASE_IDX 2 +#define mmMPC_STALL_GRACE_WINDOW 0x1355 +#define mmMPC_STALL_GRACE_WINDOW_BASE_IDX 2 +#define mmMPC_HOST_READ_CONTROL 0x1356 +#define mmMPC_HOST_READ_CONTROL_BASE_IDX 2 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0 0x135d +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 2 +#define mmADR_CFG_VUPDATE_LOCK_SET0 0x135e +#define mmADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 2 +#define mmADR_VUPDATE_LOCK_SET0 0x135f +#define mmADR_VUPDATE_LOCK_SET0_BASE_IDX 2 +#define mmCFG_VUPDATE_LOCK_SET0 0x1360 +#define mmCFG_VUPDATE_LOCK_SET0_BASE_IDX 2 +#define mmCUR_VUPDATE_LOCK_SET0 0x1361 +#define mmCUR_VUPDATE_LOCK_SET0_BASE_IDX 2 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1 0x1362 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 2 +#define mmADR_CFG_VUPDATE_LOCK_SET1 0x1363 +#define mmADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 2 +#define mmADR_VUPDATE_LOCK_SET1 0x1364 +#define mmADR_VUPDATE_LOCK_SET1_BASE_IDX 2 +#define mmCFG_VUPDATE_LOCK_SET1 0x1365 +#define mmCFG_VUPDATE_LOCK_SET1_BASE_IDX 2 +#define mmCUR_VUPDATE_LOCK_SET1 0x1366 +#define mmCUR_VUPDATE_LOCK_SET1_BASE_IDX 2 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2 0x1367 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 2 +#define mmADR_CFG_VUPDATE_LOCK_SET2 0x1368 +#define mmADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 2 +#define mmADR_VUPDATE_LOCK_SET2 0x1369 +#define mmADR_VUPDATE_LOCK_SET2_BASE_IDX 2 +#define mmCFG_VUPDATE_LOCK_SET2 0x136a +#define mmCFG_VUPDATE_LOCK_SET2_BASE_IDX 2 +#define mmCUR_VUPDATE_LOCK_SET2 0x136b +#define mmCUR_VUPDATE_LOCK_SET2_BASE_IDX 2 +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3 0x136c +#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 2 +#define mmADR_CFG_VUPDATE_LOCK_SET3 0x136d +#define mmADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 2 +#define mmADR_VUPDATE_LOCK_SET3 0x136e +#define mmADR_VUPDATE_LOCK_SET3_BASE_IDX 2 +#define mmCFG_VUPDATE_LOCK_SET3 0x136f +#define mmCFG_VUPDATE_LOCK_SET3_BASE_IDX 2 +#define mmCUR_VUPDATE_LOCK_SET3 0x1370 +#define mmCUR_VUPDATE_LOCK_SET3_BASE_IDX 2 +#define mmMPC_OUT0_MUX 0x1385 +#define mmMPC_OUT0_MUX_BASE_IDX 2 +#define mmMPC_OUT0_DENORM_CONTROL 0x1386 +#define mmMPC_OUT0_DENORM_CONTROL_BASE_IDX 2 +#define mmMPC_OUT0_DENORM_CLAMP_G_Y 0x1387 +#define mmMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 2 +#define mmMPC_OUT0_DENORM_CLAMP_B_CB 0x1388 +#define mmMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 2 +#define mmMPC_OUT1_MUX 0x1389 +#define mmMPC_OUT1_MUX_BASE_IDX 2 +#define mmMPC_OUT1_DENORM_CONTROL 0x138a +#define mmMPC_OUT1_DENORM_CONTROL_BASE_IDX 2 +#define mmMPC_OUT1_DENORM_CLAMP_G_Y 0x138b +#define mmMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 2 +#define mmMPC_OUT1_DENORM_CLAMP_B_CB 0x138c +#define mmMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec +// base address: 0x0 +#define mmMPCC_OGAM0_MPCC_OGAM_MODE 0x13ae +#define mmMPCC_OGAM0_MPCC_OGAM_MODE_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x13af +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x13b0 +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL 0x13b1 +#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x13b2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x13b3 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x13b4 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13b5 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13b6 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13b7 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x13b8 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x13b9 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x13ba +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x13bb +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x13bc +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x13bd +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x13be +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x13bf +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x13c0 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x13c1 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x13c2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x13c3 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x13c4 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x13c5 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x13c6 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x13c7 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x13c8 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x13c9 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x13ca +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x13cb +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x13cc +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x13cd +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x13ce +#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x13cf +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x13d0 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x13d1 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x13d2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x13d3 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x13d4 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x13d5 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x13d6 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x13d7 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x13d8 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x13d9 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x13da +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x13db +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x13dc +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x13dd +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x13de +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x13df +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x13e0 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x13e1 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x13e2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x13e3 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x13e4 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x13e5 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x13e6 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x13e7 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x13e8 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x13e9 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x13ea +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x13eb +#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec +// base address: 0x104 +#define mmMPCC_OGAM1_MPCC_OGAM_MODE 0x13ef +#define mmMPCC_OGAM1_MPCC_OGAM_MODE_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x13f0 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x13f1 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL 0x13f2 +#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x13f3 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x13f4 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x13f5 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13f6 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13f7 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13f8 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x13f9 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x13fa +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x13fb +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x13fc +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x13fd +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x13fe +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x13ff +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x1400 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x1401 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x1402 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x1403 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x1404 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x1405 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x1406 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x1407 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x1408 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x1409 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x140a +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x140b +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x140c +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x140d +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x140e +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x140f +#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x1410 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x1411 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x1412 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1413 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1414 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1415 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x1416 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x1417 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x1418 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x1419 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x141a +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x141b +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x141c +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x141d +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x141e +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x141f +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x1420 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x1421 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x1422 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x1423 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x1424 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x1425 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x1426 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x1427 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x1428 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x1429 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x142a +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x142b +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x142c +#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec +// base address: 0x208 +#define mmMPCC_OGAM2_MPCC_OGAM_MODE 0x1430 +#define mmMPCC_OGAM2_MPCC_OGAM_MODE_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x1431 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x1432 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL 0x1433 +#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x1434 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x1435 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x1436 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1437 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1438 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x1439 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x143a +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x143b +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x143c +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x143d +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x143e +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x143f +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x1440 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x1441 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x1442 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x1443 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x1444 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x1445 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x1446 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x1447 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x1448 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x1449 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x144a +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x144b +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x144c +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x144d +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x144e +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x144f +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x1450 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x1451 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x1452 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x1453 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1454 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1455 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1456 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x1457 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x1458 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x1459 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x145a +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x145b +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x145c +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x145d +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x145e +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x145f +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x1460 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x1461 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x1462 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x1463 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x1464 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x1465 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x1466 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x1467 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x1468 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x1469 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x146a +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x146b +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x146c +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x146d +#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec +// base address: 0x30c +#define mmMPCC_OGAM3_MPCC_OGAM_MODE 0x1471 +#define mmMPCC_OGAM3_MPCC_OGAM_MODE_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x1472 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x1473 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL 0x1474 +#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x1475 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x1476 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x1477 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1478 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1479 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x147a +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x147b +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x147c +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x147d +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x147e +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x147f +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x1480 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x1481 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x1482 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x1483 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x1484 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x1485 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x1486 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x1487 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x1488 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x1489 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x148a +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x148b +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x148c +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x148d +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x148e +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x148f +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x1490 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x1491 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x1492 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x1493 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x1494 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1495 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1496 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1497 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x1498 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x1499 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x149a +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x149b +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x149c +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x149d +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x149e +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x149f +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x14a0 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x14a1 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x14a2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x14a3 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x14a4 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x14a5 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x14a6 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x14a7 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x14a8 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x14a9 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x14aa +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x14ab +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x14ac +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x14ad +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x14ae +#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec +// base address: 0x410 +#define mmMPCC_OGAM4_MPCC_OGAM_MODE 0x14b2 +#define mmMPCC_OGAM4_MPCC_OGAM_MODE_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX 0x14b3 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA 0x14b4 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL 0x14b5 +#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B 0x14b6 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G 0x14b7 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R 0x14b8 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x14b9 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x14ba +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x14bb +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B 0x14bc +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B 0x14bd +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G 0x14be +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G 0x14bf +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R 0x14c0 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R 0x14c1 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1 0x14c2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3 0x14c3 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5 0x14c4 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7 0x14c5 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9 0x14c6 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11 0x14c7 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13 0x14c8 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15 0x14c9 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17 0x14ca +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19 0x14cb +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21 0x14cc +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23 0x14cd +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25 0x14ce +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27 0x14cf +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29 0x14d0 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31 0x14d1 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33 0x14d2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B 0x14d3 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G 0x14d4 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R 0x14d5 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x14d6 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x14d7 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x14d8 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B 0x14d9 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B 0x14da +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G 0x14db +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G 0x14dc +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R 0x14dd +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R 0x14de +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1 0x14df +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3 0x14e0 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5 0x14e1 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7 0x14e2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9 0x14e3 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11 0x14e4 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13 0x14e5 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15 0x14e6 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17 0x14e7 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19 0x14e8 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21 0x14e9 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23 0x14ea +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25 0x14eb +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27 0x14ec +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29 0x14ed +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31 0x14ee +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2 +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33 0x14ef +#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2 + + +// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec +// base address: 0x0 +#define mmMPC_OUT_CSC_COEF_FORMAT 0x15b6 +#define mmMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 2 +#define mmMPC_OUT0_CSC_MODE 0x15b7 +#define mmMPC_OUT0_CSC_MODE_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C11_C12_A 0x15b8 +#define mmMPC_OUT0_CSC_C11_C12_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C13_C14_A 0x15b9 +#define mmMPC_OUT0_CSC_C13_C14_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C21_C22_A 0x15ba +#define mmMPC_OUT0_CSC_C21_C22_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C23_C24_A 0x15bb +#define mmMPC_OUT0_CSC_C23_C24_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C31_C32_A 0x15bc +#define mmMPC_OUT0_CSC_C31_C32_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C33_C34_A 0x15bd +#define mmMPC_OUT0_CSC_C33_C34_A_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C11_C12_B 0x15be +#define mmMPC_OUT0_CSC_C11_C12_B_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C13_C14_B 0x15bf +#define mmMPC_OUT0_CSC_C13_C14_B_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C21_C22_B 0x15c0 +#define mmMPC_OUT0_CSC_C21_C22_B_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C23_C24_B 0x15c1 +#define mmMPC_OUT0_CSC_C23_C24_B_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C31_C32_B 0x15c2 +#define mmMPC_OUT0_CSC_C31_C32_B_BASE_IDX 2 +#define mmMPC_OUT0_CSC_C33_C34_B 0x15c3 +#define mmMPC_OUT0_CSC_C33_C34_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_MODE 0x15c4 +#define mmMPC_OUT1_CSC_MODE_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C11_C12_A 0x15c5 +#define mmMPC_OUT1_CSC_C11_C12_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C13_C14_A 0x15c6 +#define mmMPC_OUT1_CSC_C13_C14_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C21_C22_A 0x15c7 +#define mmMPC_OUT1_CSC_C21_C22_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C23_C24_A 0x15c8 +#define mmMPC_OUT1_CSC_C23_C24_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C31_C32_A 0x15c9 +#define mmMPC_OUT1_CSC_C31_C32_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C33_C34_A 0x15ca +#define mmMPC_OUT1_CSC_C33_C34_A_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C11_C12_B 0x15cb +#define mmMPC_OUT1_CSC_C11_C12_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C13_C14_B 0x15cc +#define mmMPC_OUT1_CSC_C13_C14_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C21_C22_B 0x15cd +#define mmMPC_OUT1_CSC_C21_C22_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C23_C24_B 0x15ce +#define mmMPC_OUT1_CSC_C23_C24_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C31_C32_B 0x15cf +#define mmMPC_OUT1_CSC_C31_C32_B_BASE_IDX 2 +#define mmMPC_OUT1_CSC_C33_C34_B 0x15d0 +#define mmMPC_OUT1_CSC_C33_C34_B_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_fmt0_dispdec +// base address: 0x0 +#define mmFMT0_FMT_CLAMP_COMPONENT_R 0x183c +#define mmFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2 +#define mmFMT0_FMT_CLAMP_COMPONENT_G 0x183d +#define mmFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2 +#define mmFMT0_FMT_CLAMP_COMPONENT_B 0x183e +#define mmFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2 +#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f +#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2 +#define mmFMT0_FMT_CONTROL 0x1840 +#define mmFMT0_FMT_CONTROL_BASE_IDX 2 +#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1841 +#define mmFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2 +#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1842 +#define mmFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2 +#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1843 +#define mmFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2 +#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1844 +#define mmFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2 +#define mmFMT0_FMT_CLAMP_CNTL 0x1845 +#define mmFMT0_FMT_CLAMP_CNTL_BASE_IDX 2 +#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846 +#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2 +#define mmFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847 +#define mmFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2 +#define mmFMT0_FMT_422_CONTROL 0x1849 +#define mmFMT0_FMT_422_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_dpg0_dispdec +// base address: 0x0 +#define mmDPG0_DPG_CONTROL 0x1854 +#define mmDPG0_DPG_CONTROL_BASE_IDX 2 +#define mmDPG0_DPG_RAMP_CONTROL 0x1855 +#define mmDPG0_DPG_RAMP_CONTROL_BASE_IDX 2 +#define mmDPG0_DPG_DIMENSIONS 0x1856 +#define mmDPG0_DPG_DIMENSIONS_BASE_IDX 2 +#define mmDPG0_DPG_COLOUR_R_CR 0x1857 +#define mmDPG0_DPG_COLOUR_R_CR_BASE_IDX 2 +#define mmDPG0_DPG_COLOUR_G_Y 0x1858 +#define mmDPG0_DPG_COLOUR_G_Y_BASE_IDX 2 +#define mmDPG0_DPG_COLOUR_B_CB 0x1859 +#define mmDPG0_DPG_COLOUR_B_CB_BASE_IDX 2 +#define mmDPG0_DPG_OFFSET_SEGMENT 0x185a +#define mmDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2 +#define mmDPG0_DPG_STATUS 0x185b +#define mmDPG0_DPG_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_oppbuf0_dispdec +// base address: 0x0 +#define mmOPPBUF0_OPPBUF_CONTROL 0x1884 +#define mmOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2 +#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885 +#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2 +#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886 +#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_opp_pipe0_dispdec +// base address: 0x0 +#define mmOPP_PIPE0_OPP_PIPE_CONTROL 0x188c +#define mmOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec +// base address: 0x0 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0 0x1893 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_BASE_IDX 2 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1 0x1894 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_BASE_IDX 2 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2 0x1895 +#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_fmt1_dispdec +// base address: 0x168 +#define mmFMT1_FMT_CLAMP_COMPONENT_R 0x1896 +#define mmFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2 +#define mmFMT1_FMT_CLAMP_COMPONENT_G 0x1897 +#define mmFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2 +#define mmFMT1_FMT_CLAMP_COMPONENT_B 0x1898 +#define mmFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2 +#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899 +#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2 +#define mmFMT1_FMT_CONTROL 0x189a +#define mmFMT1_FMT_CONTROL_BASE_IDX 2 +#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x189b +#define mmFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2 +#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x189c +#define mmFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2 +#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x189d +#define mmFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2 +#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x189e +#define mmFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2 +#define mmFMT1_FMT_CLAMP_CNTL 0x189f +#define mmFMT1_FMT_CLAMP_CNTL_BASE_IDX 2 +#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0 +#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2 +#define mmFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1 +#define mmFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2 +#define mmFMT1_FMT_422_CONTROL 0x18a3 +#define mmFMT1_FMT_422_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_dpg1_dispdec +// base address: 0x168 +#define mmDPG1_DPG_CONTROL 0x18ae +#define mmDPG1_DPG_CONTROL_BASE_IDX 2 +#define mmDPG1_DPG_RAMP_CONTROL 0x18af +#define mmDPG1_DPG_RAMP_CONTROL_BASE_IDX 2 +#define mmDPG1_DPG_DIMENSIONS 0x18b0 +#define mmDPG1_DPG_DIMENSIONS_BASE_IDX 2 +#define mmDPG1_DPG_COLOUR_R_CR 0x18b1 +#define mmDPG1_DPG_COLOUR_R_CR_BASE_IDX 2 +#define mmDPG1_DPG_COLOUR_G_Y 0x18b2 +#define mmDPG1_DPG_COLOUR_G_Y_BASE_IDX 2 +#define mmDPG1_DPG_COLOUR_B_CB 0x18b3 +#define mmDPG1_DPG_COLOUR_B_CB_BASE_IDX 2 +#define mmDPG1_DPG_OFFSET_SEGMENT 0x18b4 +#define mmDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2 +#define mmDPG1_DPG_STATUS 0x18b5 +#define mmDPG1_DPG_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_oppbuf1_dispdec +// base address: 0x168 +#define mmOPPBUF1_OPPBUF_CONTROL 0x18de +#define mmOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2 +#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df +#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2 +#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0 +#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_opp_pipe1_dispdec +// base address: 0x168 +#define mmOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6 +#define mmOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2 + +// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec +// base address: 0x168 +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2 +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2 +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0 0x18ed +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_BASE_IDX 2 +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1 0x18ee +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_BASE_IDX 2 +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2 0x18ef +#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_BASE_IDX 2 + + +// addressBlock: dce_dc_opp_opp_top_dispdec +// base address: 0x0 +#define mmOPP_TOP_CLK_CONTROL 0x1a5e +#define mmOPP_TOP_CLK_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_optc_odm0_dispdec +// base address: 0x0 +#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca +#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2 +#define mmODM0_OPTC_DATA_SOURCE_SELECT 0x1acb +#define mmODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2 +#define mmODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc +#define mmODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2 +#define mmODM0_OPTC_BYTES_PER_PIXEL 0x1acd +#define mmODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2 +#define mmODM0_OPTC_WIDTH_CONTROL 0x1ace +#define mmODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2 +#define mmODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf +#define mmODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2 + +// addressBlock: dce_dc_optc_odm1_dispdec +// base address: 0x40 +#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada +#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2 +#define mmODM1_OPTC_DATA_SOURCE_SELECT 0x1adb +#define mmODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2 +#define mmODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc +#define mmODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2 +#define mmODM1_OPTC_BYTES_PER_PIXEL 0x1add +#define mmODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2 +#define mmODM1_OPTC_WIDTH_CONTROL 0x1ade +#define mmODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2 +#define mmODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf +#define mmODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_optc_otg0_dispdec +// base address: 0x0 +#define mmOTG0_OTG_H_TOTAL 0x1b2a +#define mmOTG0_OTG_H_TOTAL_BASE_IDX 2 +#define mmOTG0_OTG_H_BLANK_START_END 0x1b2b +#define mmOTG0_OTG_H_BLANK_START_END_BASE_IDX 2 +#define mmOTG0_OTG_H_SYNC_A 0x1b2c +#define mmOTG0_OTG_H_SYNC_A_BASE_IDX 2 +#define mmOTG0_OTG_H_SYNC_A_CNTL 0x1b2d +#define mmOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_H_TIMING_CNTL 0x1b2e +#define mmOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL 0x1b2f +#define mmOTG0_OTG_V_TOTAL_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL_MIN 0x1b30 +#define mmOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL_MAX 0x1b31 +#define mmOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL_MID 0x1b32 +#define mmOTG0_OTG_V_TOTAL_MID_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL_CONTROL 0x1b33 +#define mmOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_V_TOTAL_INT_STATUS 0x1b34 +#define mmOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b35 +#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_V_BLANK_START_END 0x1b36 +#define mmOTG0_OTG_V_BLANK_START_END_BASE_IDX 2 +#define mmOTG0_OTG_V_SYNC_A 0x1b37 +#define mmOTG0_OTG_V_SYNC_A_BASE_IDX 2 +#define mmOTG0_OTG_V_SYNC_A_CNTL 0x1b38 +#define mmOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_TRIGA_CNTL 0x1b39 +#define mmOTG0_OTG_TRIGA_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3a +#define mmOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2 +#define mmOTG0_OTG_TRIGB_CNTL 0x1b3b +#define mmOTG0_OTG_TRIGB_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3c +#define mmOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2 +#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3d +#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b3f +#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2 +#define mmOTG0_OTG_CONTROL 0x1b41 +#define mmOTG0_OTG_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_BLANK_CONTROL 0x1b42 +#define mmOTG0_OTG_BLANK_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_INTERLACE_CONTROL 0x1b44 +#define mmOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_INTERLACE_STATUS 0x1b45 +#define mmOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47 +#define mmOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2 +#define mmOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48 +#define mmOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2 +#define mmOTG0_OTG_STATUS 0x1b49 +#define mmOTG0_OTG_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_STATUS_POSITION 0x1b4a +#define mmOTG0_OTG_STATUS_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_NOM_VERT_POSITION 0x1b4b +#define mmOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_STATUS_FRAME_COUNT 0x1b4c +#define mmOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2 +#define mmOTG0_OTG_STATUS_VF_COUNT 0x1b4d +#define mmOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2 +#define mmOTG0_OTG_STATUS_HV_COUNT 0x1b4e +#define mmOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2 +#define mmOTG0_OTG_COUNT_CONTROL 0x1b4f +#define mmOTG0_OTG_COUNT_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_COUNT_RESET 0x1b50 +#define mmOTG0_OTG_COUNT_RESET_BASE_IDX 2 +#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b51 +#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2 +#define mmOTG0_OTG_VERT_SYNC_CONTROL 0x1b52 +#define mmOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_STEREO_STATUS 0x1b53 +#define mmOTG0_OTG_STEREO_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_STEREO_CONTROL 0x1b54 +#define mmOTG0_OTG_STEREO_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_SNAPSHOT_STATUS 0x1b55 +#define mmOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_SNAPSHOT_CONTROL 0x1b56 +#define mmOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_SNAPSHOT_POSITION 0x1b57 +#define mmOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_SNAPSHOT_FRAME 0x1b58 +#define mmOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2 +#define mmOTG0_OTG_INTERRUPT_CONTROL 0x1b59 +#define mmOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_UPDATE_LOCK 0x1b5a +#define mmOTG0_OTG_UPDATE_LOCK_BASE_IDX 2 +#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5b +#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_MASTER_EN 0x1b5c +#define mmOTG0_OTG_MASTER_EN_BASE_IDX 2 +#define mmOTG0_OTG_BLANK_DATA_COLOR 0x1b5e +#define mmOTG0_OTG_BLANK_DATA_COLOR_BASE_IDX 2 +#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT 0x1b5f +#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2 +#define mmOTG0_OTG_BLACK_COLOR 0x1b60 +#define mmOTG0_OTG_BLACK_COLOR_BASE_IDX 2 +#define mmOTG0_OTG_BLACK_COLOR_EXT 0x1b61 +#define mmOTG0_OTG_BLACK_COLOR_EXT_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b62 +#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b63 +#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b64 +#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b65 +#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b66 +#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2 +#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b67 +#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC_CNTL 0x1b68 +#define mmOTG0_OTG_CRC_CNTL_BASE_IDX 2 +#define mmOTG0_OTG_CRC_CNTL2 0x1b69 +#define mmOTG0_OTG_CRC_CNTL2_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b6a +#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b6b +#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b6c +#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b6d +#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_DATA_RG 0x1b6e +#define mmOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2 +#define mmOTG0_OTG_CRC0_DATA_B 0x1b6f +#define mmOTG0_OTG_CRC0_DATA_B_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b70 +#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b71 +#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b72 +#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b73 +#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_DATA_RG 0x1b74 +#define mmOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2 +#define mmOTG0_OTG_CRC1_DATA_B 0x1b75 +#define mmOTG0_OTG_CRC1_DATA_B_BASE_IDX 2 +#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b7a +#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2 +#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b7b +#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2 +#define mmOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b82 +#define mmOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b83 +#define mmOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_GSL_VSYNC_GAP 0x1b84 +#define mmOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2 +#define mmOTG0_OTG_MASTER_UPDATE_MODE 0x1b85 +#define mmOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2 +#define mmOTG0_OTG_CLOCK_CONTROL 0x1b86 +#define mmOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_VSTARTUP_PARAM 0x1b87 +#define mmOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2 +#define mmOTG0_OTG_VUPDATE_PARAM 0x1b88 +#define mmOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2 +#define mmOTG0_OTG_VREADY_PARAM 0x1b89 +#define mmOTG0_OTG_VREADY_PARAM_BASE_IDX 2 +#define mmOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b8a +#define mmOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2 +#define mmOTG0_OTG_MASTER_UPDATE_LOCK 0x1b8b +#define mmOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2 +#define mmOTG0_OTG_GSL_CONTROL 0x1b8c +#define mmOTG0_OTG_GSL_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_GSL_WINDOW_X 0x1b8d +#define mmOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2 +#define mmOTG0_OTG_GSL_WINDOW_Y 0x1b8e +#define mmOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2 +#define mmOTG0_OTG_VUPDATE_KEEPOUT 0x1b8f +#define mmOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2 +#define mmOTG0_OTG_GLOBAL_CONTROL0 0x1b90 +#define mmOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2 +#define mmOTG0_OTG_GLOBAL_CONTROL1 0x1b91 +#define mmOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2 +#define mmOTG0_OTG_GLOBAL_CONTROL2 0x1b92 +#define mmOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2 +#define mmOTG0_OTG_GLOBAL_CONTROL3 0x1b93 +#define mmOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2 +#define mmOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b94 +#define mmOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_DRR_CONTROL 0x1b97 +#define mmOTG0_OTG_DRR_CONTROL_BASE_IDX 2 +#define mmOTG0_OTG_DSC_START_POSITION 0x1b99 +#define mmOTG0_OTG_DSC_START_POSITION_BASE_IDX 2 + +// addressBlock: dce_dc_optc_otg1_dispdec +// base address: 0x200 +#define mmOTG1_OTG_H_TOTAL 0x1baa +#define mmOTG1_OTG_H_TOTAL_BASE_IDX 2 +#define mmOTG1_OTG_H_BLANK_START_END 0x1bab +#define mmOTG1_OTG_H_BLANK_START_END_BASE_IDX 2 +#define mmOTG1_OTG_H_SYNC_A 0x1bac +#define mmOTG1_OTG_H_SYNC_A_BASE_IDX 2 +#define mmOTG1_OTG_H_SYNC_A_CNTL 0x1bad +#define mmOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_H_TIMING_CNTL 0x1bae +#define mmOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL 0x1baf +#define mmOTG1_OTG_V_TOTAL_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL_MIN 0x1bb0 +#define mmOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL_MAX 0x1bb1 +#define mmOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL_MID 0x1bb2 +#define mmOTG1_OTG_V_TOTAL_MID_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL_CONTROL 0x1bb3 +#define mmOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb4 +#define mmOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb5 +#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_V_BLANK_START_END 0x1bb6 +#define mmOTG1_OTG_V_BLANK_START_END_BASE_IDX 2 +#define mmOTG1_OTG_V_SYNC_A 0x1bb7 +#define mmOTG1_OTG_V_SYNC_A_BASE_IDX 2 +#define mmOTG1_OTG_V_SYNC_A_CNTL 0x1bb8 +#define mmOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_TRIGA_CNTL 0x1bb9 +#define mmOTG1_OTG_TRIGA_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bba +#define mmOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2 +#define mmOTG1_OTG_TRIGB_CNTL 0x1bbb +#define mmOTG1_OTG_TRIGB_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbc +#define mmOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2 +#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbd +#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bbf +#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2 +#define mmOTG1_OTG_CONTROL 0x1bc1 +#define mmOTG1_OTG_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_BLANK_CONTROL 0x1bc2 +#define mmOTG1_OTG_BLANK_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_INTERLACE_CONTROL 0x1bc4 +#define mmOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_INTERLACE_STATUS 0x1bc5 +#define mmOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7 +#define mmOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2 +#define mmOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8 +#define mmOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2 +#define mmOTG1_OTG_STATUS 0x1bc9 +#define mmOTG1_OTG_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_STATUS_POSITION 0x1bca +#define mmOTG1_OTG_STATUS_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_NOM_VERT_POSITION 0x1bcb +#define mmOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_STATUS_FRAME_COUNT 0x1bcc +#define mmOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2 +#define mmOTG1_OTG_STATUS_VF_COUNT 0x1bcd +#define mmOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2 +#define mmOTG1_OTG_STATUS_HV_COUNT 0x1bce +#define mmOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2 +#define mmOTG1_OTG_COUNT_CONTROL 0x1bcf +#define mmOTG1_OTG_COUNT_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_COUNT_RESET 0x1bd0 +#define mmOTG1_OTG_COUNT_RESET_BASE_IDX 2 +#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd1 +#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2 +#define mmOTG1_OTG_VERT_SYNC_CONTROL 0x1bd2 +#define mmOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_STEREO_STATUS 0x1bd3 +#define mmOTG1_OTG_STEREO_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_STEREO_CONTROL 0x1bd4 +#define mmOTG1_OTG_STEREO_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_SNAPSHOT_STATUS 0x1bd5 +#define mmOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_SNAPSHOT_CONTROL 0x1bd6 +#define mmOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_SNAPSHOT_POSITION 0x1bd7 +#define mmOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_SNAPSHOT_FRAME 0x1bd8 +#define mmOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2 +#define mmOTG1_OTG_INTERRUPT_CONTROL 0x1bd9 +#define mmOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_UPDATE_LOCK 0x1bda +#define mmOTG1_OTG_UPDATE_LOCK_BASE_IDX 2 +#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdb +#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_MASTER_EN 0x1bdc +#define mmOTG1_OTG_MASTER_EN_BASE_IDX 2 +#define mmOTG1_OTG_BLANK_DATA_COLOR 0x1bde +#define mmOTG1_OTG_BLANK_DATA_COLOR_BASE_IDX 2 +#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT 0x1bdf +#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2 +#define mmOTG1_OTG_BLACK_COLOR 0x1be0 +#define mmOTG1_OTG_BLACK_COLOR_BASE_IDX 2 +#define mmOTG1_OTG_BLACK_COLOR_EXT 0x1be1 +#define mmOTG1_OTG_BLACK_COLOR_EXT_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1be2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be3 +#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be4 +#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be5 +#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be6 +#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2 +#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be7 +#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC_CNTL 0x1be8 +#define mmOTG1_OTG_CRC_CNTL_BASE_IDX 2 +#define mmOTG1_OTG_CRC_CNTL2 0x1be9 +#define mmOTG1_OTG_CRC_CNTL2_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1bea +#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1beb +#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1bec +#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1bed +#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_DATA_RG 0x1bee +#define mmOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2 +#define mmOTG1_OTG_CRC0_DATA_B 0x1bef +#define mmOTG1_OTG_CRC0_DATA_B_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bf0 +#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bf1 +#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bf2 +#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bf3 +#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_DATA_RG 0x1bf4 +#define mmOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2 +#define mmOTG1_OTG_CRC1_DATA_B 0x1bf5 +#define mmOTG1_OTG_CRC1_DATA_B_BASE_IDX 2 +#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bfa +#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2 +#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bfb +#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2 +#define mmOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c02 +#define mmOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c03 +#define mmOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_GSL_VSYNC_GAP 0x1c04 +#define mmOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2 +#define mmOTG1_OTG_MASTER_UPDATE_MODE 0x1c05 +#define mmOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2 +#define mmOTG1_OTG_CLOCK_CONTROL 0x1c06 +#define mmOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_VSTARTUP_PARAM 0x1c07 +#define mmOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2 +#define mmOTG1_OTG_VUPDATE_PARAM 0x1c08 +#define mmOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2 +#define mmOTG1_OTG_VREADY_PARAM 0x1c09 +#define mmOTG1_OTG_VREADY_PARAM_BASE_IDX 2 +#define mmOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c0a +#define mmOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2 +#define mmOTG1_OTG_MASTER_UPDATE_LOCK 0x1c0b +#define mmOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2 +#define mmOTG1_OTG_GSL_CONTROL 0x1c0c +#define mmOTG1_OTG_GSL_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_GSL_WINDOW_X 0x1c0d +#define mmOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2 +#define mmOTG1_OTG_GSL_WINDOW_Y 0x1c0e +#define mmOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2 +#define mmOTG1_OTG_VUPDATE_KEEPOUT 0x1c0f +#define mmOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2 +#define mmOTG1_OTG_GLOBAL_CONTROL0 0x1c10 +#define mmOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2 +#define mmOTG1_OTG_GLOBAL_CONTROL1 0x1c11 +#define mmOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2 +#define mmOTG1_OTG_GLOBAL_CONTROL2 0x1c12 +#define mmOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2 +#define mmOTG1_OTG_GLOBAL_CONTROL3 0x1c13 +#define mmOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2 +#define mmOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c14 +#define mmOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_DRR_CONTROL 0x1c17 +#define mmOTG1_OTG_DRR_CONTROL_BASE_IDX 2 +#define mmOTG1_OTG_DSC_START_POSITION 0x1c19 +#define mmOTG1_OTG_DSC_START_POSITION_BASE_IDX 2 + + +// addressBlock: dce_dc_optc_optc_misc_dispdec +// base address: 0x0 +#define mmDWB_SOURCE_SELECT 0x1e2a +#define mmDWB_SOURCE_SELECT_BASE_IDX 2 +#define mmGSL_SOURCE_SELECT 0x1e2b +#define mmGSL_SOURCE_SELECT_BASE_IDX 2 +#define mmOPTC_CLOCK_CONTROL 0x1e2c +#define mmOPTC_CLOCK_CONTROL_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dout_i2c_dispdec +// base address: 0x0 +#define mmDC_I2C_CONTROL 0x1e98 +#define mmDC_I2C_CONTROL_BASE_IDX 2 +#define mmDC_I2C_ARBITRATION 0x1e99 +#define mmDC_I2C_ARBITRATION_BASE_IDX 2 +#define mmDC_I2C_INTERRUPT_CONTROL 0x1e9a +#define mmDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmDC_I2C_SW_STATUS 0x1e9b +#define mmDC_I2C_SW_STATUS_BASE_IDX 2 +#define mmDC_I2C_DDC1_HW_STATUS 0x1e9c +#define mmDC_I2C_DDC1_HW_STATUS_BASE_IDX 2 +#define mmDC_I2C_DDC2_HW_STATUS 0x1e9d +#define mmDC_I2C_DDC2_HW_STATUS_BASE_IDX 2 +#define mmDC_I2C_DDC1_SPEED 0x1ea2 +#define mmDC_I2C_DDC1_SPEED_BASE_IDX 2 +#define mmDC_I2C_DDC1_SETUP 0x1ea3 +#define mmDC_I2C_DDC1_SETUP_BASE_IDX 2 +#define mmDC_I2C_DDC2_SPEED 0x1ea4 +#define mmDC_I2C_DDC2_SPEED_BASE_IDX 2 +#define mmDC_I2C_DDC2_SETUP 0x1ea5 +#define mmDC_I2C_DDC2_SETUP_BASE_IDX 2 +#define mmDC_I2C_TRANSACTION0 0x1eae +#define mmDC_I2C_TRANSACTION0_BASE_IDX 2 +#define mmDC_I2C_TRANSACTION1 0x1eaf +#define mmDC_I2C_TRANSACTION1_BASE_IDX 2 +#define mmDC_I2C_TRANSACTION2 0x1eb0 +#define mmDC_I2C_TRANSACTION2_BASE_IDX 2 +#define mmDC_I2C_TRANSACTION3 0x1eb1 +#define mmDC_I2C_TRANSACTION3_BASE_IDX 2 +#define mmDC_I2C_DATA 0x1eb2 +#define mmDC_I2C_DATA_BASE_IDX 2 +#define mmDC_I2C_EDID_DETECT_CTRL 0x1eb6 +#define mmDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2 +#define mmDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7 +#define mmDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dio_misc_dispdec +// base address: 0x0 +#define mmDIO_SCRATCH0 0x1eca +#define mmDIO_SCRATCH0_BASE_IDX 2 +#define mmDIO_SCRATCH1 0x1ecb +#define mmDIO_SCRATCH1_BASE_IDX 2 +#define mmDIO_SCRATCH2 0x1ecc +#define mmDIO_SCRATCH2_BASE_IDX 2 +#define mmDIO_SCRATCH3 0x1ecd +#define mmDIO_SCRATCH3_BASE_IDX 2 +#define mmDIO_SCRATCH4 0x1ece +#define mmDIO_SCRATCH4_BASE_IDX 2 +#define mmDIO_SCRATCH5 0x1ecf +#define mmDIO_SCRATCH5_BASE_IDX 2 +#define mmDIO_SCRATCH6 0x1ed0 +#define mmDIO_SCRATCH6_BASE_IDX 2 +#define mmDIO_SCRATCH7 0x1ed1 +#define mmDIO_SCRATCH7_BASE_IDX 2 +#define mmDIO_MEM_PWR_STATUS 0x1edd +#define mmDIO_MEM_PWR_STATUS_BASE_IDX 2 +#define mmDIO_MEM_PWR_CTRL 0x1ede +#define mmDIO_MEM_PWR_CTRL_BASE_IDX 2 +#define mmDIO_MEM_PWR_CTRL2 0x1edf +#define mmDIO_MEM_PWR_CTRL2_BASE_IDX 2 +#define mmDIO_CLK_CNTL 0x1ee0 +#define mmDIO_CLK_CNTL_BASE_IDX 2 +#define mmDIO_MEM_PWR_CTRL3 0x1ee1 +#define mmDIO_MEM_PWR_CTRL3_BASE_IDX 2 +#define mmDIO_POWER_MANAGEMENT_CNTL 0x1ee4 +#define mmDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2 +#define mmDIG_SOFT_RESET 0x1eee +#define mmDIG_SOFT_RESET_BASE_IDX 2 +#define mmDIO_MEM_PWR_STATUS1 0x1ef0 +#define mmDIO_MEM_PWR_STATUS1_BASE_IDX 2 +#define mmDIO_CLK_CNTL2 0x1ef2 +#define mmDIO_CLK_CNTL2_BASE_IDX 2 +#define mmDIO_CLK_CNTL3 0x1ef3 +#define mmDIO_CLK_CNTL3_BASE_IDX 2 +#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff +#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2 +#define mmDIO_GENERIC_INTERRUPT_MESSAGE 0x1f02 +#define mmDIO_GENERIC_INTERRUPT_MESSAGE_BASE_IDX 2 +#define mmDIO_GENERIC_INTERRUPT_CLEAR 0x1f03 +#define mmDIO_GENERIC_INTERRUPT_CLEAR_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_hpd0_dispdec +// base address: 0x0 +#define mmHPD0_DC_HPD_INT_STATUS 0x1f14 +#define mmHPD0_DC_HPD_INT_STATUS_BASE_IDX 2 +#define mmHPD0_DC_HPD_INT_CONTROL 0x1f15 +#define mmHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2 +#define mmHPD0_DC_HPD_CONTROL 0x1f16 +#define mmHPD0_DC_HPD_CONTROL_BASE_IDX 2 +#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17 +#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2 +#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18 +#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_hpd1_dispdec +// base address: 0x20 +#define mmHPD1_DC_HPD_INT_STATUS 0x1f1c +#define mmHPD1_DC_HPD_INT_STATUS_BASE_IDX 2 +#define mmHPD1_DC_HPD_INT_CONTROL 0x1f1d +#define mmHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2 +#define mmHPD1_DC_HPD_CONTROL 0x1f1e +#define mmHPD1_DC_HPD_CONTROL_BASE_IDX 2 +#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f +#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2 +#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20 +#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2 + +// addressBlock: dce_dc_dio_dp_aux0_dispdec +// base address: 0x0 +#define mmDP_AUX0_AUX_CONTROL 0x1f50 +#define mmDP_AUX0_AUX_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_SW_CONTROL 0x1f51 +#define mmDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_ARB_CONTROL 0x1f52 +#define mmDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53 +#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_SW_STATUS 0x1f54 +#define mmDP_AUX0_AUX_SW_STATUS_BASE_IDX 2 +#define mmDP_AUX0_AUX_LS_STATUS 0x1f55 +#define mmDP_AUX0_AUX_LS_STATUS_BASE_IDX 2 +#define mmDP_AUX0_AUX_SW_DATA 0x1f56 +#define mmDP_AUX0_AUX_SW_DATA_BASE_IDX 2 +#define mmDP_AUX0_AUX_LS_DATA 0x1f57 +#define mmDP_AUX0_AUX_LS_DATA_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58 +#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59 +#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a +#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b +#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c +#define mmDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2 +#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d +#define mmDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dp_aux1_dispdec +// base address: 0x70 +#define mmDP_AUX1_AUX_CONTROL 0x1f6c +#define mmDP_AUX1_AUX_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_SW_CONTROL 0x1f6d +#define mmDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_ARB_CONTROL 0x1f6e +#define mmDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f +#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_SW_STATUS 0x1f70 +#define mmDP_AUX1_AUX_SW_STATUS_BASE_IDX 2 +#define mmDP_AUX1_AUX_LS_STATUS 0x1f71 +#define mmDP_AUX1_AUX_LS_STATUS_BASE_IDX 2 +#define mmDP_AUX1_AUX_SW_DATA 0x1f72 +#define mmDP_AUX1_AUX_SW_DATA_BASE_IDX 2 +#define mmDP_AUX1_AUX_LS_DATA 0x1f73 +#define mmDP_AUX1_AUX_LS_DATA_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74 +#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75 +#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76 +#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77 +#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78 +#define mmDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2 +#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79 +#define mmDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dig0_dispdec +// base address: 0x0 +#define mmDIG0_DIG_FE_CNTL 0x2068 +#define mmDIG0_DIG_FE_CNTL_BASE_IDX 2 +#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x2069 +#define mmDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2 +#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x206a +#define mmDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2 +#define mmDIG0_DIG_CLOCK_PATTERN 0x206b +#define mmDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2 +#define mmDIG0_DIG_TEST_PATTERN 0x206c +#define mmDIG0_DIG_TEST_PATTERN_BASE_IDX 2 +#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x206d +#define mmDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2 +#define mmDIG0_DIG_FIFO_STATUS 0x206e +#define mmDIG0_DIG_FIFO_STATUS_BASE_IDX 2 +#define mmDIG0_HDMI_METADATA_PACKET_CONTROL 0x206f +#define mmDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x2070 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2 +#define mmDIG0_HDMI_CONTROL 0x2071 +#define mmDIG0_HDMI_CONTROL_BASE_IDX 2 +#define mmDIG0_HDMI_STATUS 0x2072 +#define mmDIG0_HDMI_STATUS_BASE_IDX 2 +#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x2073 +#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x2074 +#define mmDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x2075 +#define mmDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x2076 +#define mmDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2 +#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x2077 +#define mmDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x2078 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2 +#define mmDIG0_AFMT_INTERRUPT_STATUS 0x2079 +#define mmDIG0_AFMT_INTERRUPT_STATUS_BASE_IDX 2 +#define mmDIG0_HDMI_GC 0x207b +#define mmDIG0_HDMI_GC_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x207c +#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC1_0 0x207d +#define mmDIG0_AFMT_ISRC1_0_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC1_1 0x207e +#define mmDIG0_AFMT_ISRC1_1_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC1_2 0x207f +#define mmDIG0_AFMT_ISRC1_2_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC1_3 0x2080 +#define mmDIG0_AFMT_ISRC1_3_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC1_4 0x2081 +#define mmDIG0_AFMT_ISRC1_4_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC2_0 0x2082 +#define mmDIG0_AFMT_ISRC2_0_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC2_1 0x2083 +#define mmDIG0_AFMT_ISRC2_1_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC2_2 0x2084 +#define mmDIG0_AFMT_ISRC2_2_BASE_IDX 2 +#define mmDIG0_AFMT_ISRC2_3 0x2085 +#define mmDIG0_AFMT_ISRC2_3_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x2086 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x2087 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2 +#define mmDIG0_HDMI_DB_CONTROL 0x2088 +#define mmDIG0_HDMI_DB_CONTROL_BASE_IDX 2 +#define mmDIG0_DME_CONTROL 0x2089 +#define mmDIG0_DME_CONTROL_BASE_IDX 2 +#define mmDIG0_AFMT_MPEG_INFO0 0x208a +#define mmDIG0_AFMT_MPEG_INFO0_BASE_IDX 2 +#define mmDIG0_AFMT_MPEG_INFO1 0x208b +#define mmDIG0_AFMT_MPEG_INFO1_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_HDR 0x208c +#define mmDIG0_AFMT_GENERIC_HDR_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_0 0x208d +#define mmDIG0_AFMT_GENERIC_0_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_1 0x208e +#define mmDIG0_AFMT_GENERIC_1_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_2 0x208f +#define mmDIG0_AFMT_GENERIC_2_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_3 0x2090 +#define mmDIG0_AFMT_GENERIC_3_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_4 0x2091 +#define mmDIG0_AFMT_GENERIC_4_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_5 0x2092 +#define mmDIG0_AFMT_GENERIC_5_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_6 0x2093 +#define mmDIG0_AFMT_GENERIC_6_BASE_IDX 2 +#define mmDIG0_AFMT_GENERIC_7 0x2094 +#define mmDIG0_AFMT_GENERIC_7_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x2095 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_32_0 0x2096 +#define mmDIG0_HDMI_ACR_32_0_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_32_1 0x2097 +#define mmDIG0_HDMI_ACR_32_1_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_44_0 0x2098 +#define mmDIG0_HDMI_ACR_44_0_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_44_1 0x2099 +#define mmDIG0_HDMI_ACR_44_1_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_48_0 0x209a +#define mmDIG0_HDMI_ACR_48_0_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_48_1 0x209b +#define mmDIG0_HDMI_ACR_48_1_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_STATUS_0 0x209c +#define mmDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2 +#define mmDIG0_HDMI_ACR_STATUS_1 0x209d +#define mmDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_INFO0 0x209e +#define mmDIG0_AFMT_AUDIO_INFO0_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_INFO1 0x209f +#define mmDIG0_AFMT_AUDIO_INFO1_BASE_IDX 2 +#define mmDIG0_AFMT_60958_0 0x20a0 +#define mmDIG0_AFMT_60958_0_BASE_IDX 2 +#define mmDIG0_AFMT_60958_1 0x20a1 +#define mmDIG0_AFMT_60958_1_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x20a2 +#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2 +#define mmDIG0_AFMT_RAMP_CONTROL0 0x20a3 +#define mmDIG0_AFMT_RAMP_CONTROL0_BASE_IDX 2 +#define mmDIG0_AFMT_RAMP_CONTROL1 0x20a4 +#define mmDIG0_AFMT_RAMP_CONTROL1_BASE_IDX 2 +#define mmDIG0_AFMT_RAMP_CONTROL2 0x20a5 +#define mmDIG0_AFMT_RAMP_CONTROL2_BASE_IDX 2 +#define mmDIG0_AFMT_RAMP_CONTROL3 0x20a6 +#define mmDIG0_AFMT_RAMP_CONTROL3_BASE_IDX 2 +#define mmDIG0_AFMT_60958_2 0x20a7 +#define mmDIG0_AFMT_60958_2_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x20a8 +#define mmDIG0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2 +#define mmDIG0_AFMT_STATUS 0x20a9 +#define mmDIG0_AFMT_STATUS_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x20aa +#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x20ab +#define mmDIG0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x20ac +#define mmDIG0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2 +#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x20ad +#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2 +#define mmDIG0_DIG_BE_CNTL 0x20af +#define mmDIG0_DIG_BE_CNTL_BASE_IDX 2 +#define mmDIG0_DIG_BE_EN_CNTL 0x20b0 +#define mmDIG0_DIG_BE_EN_CNTL_BASE_IDX 2 +#define mmDIG0_TMDS_CNTL 0x20d3 +#define mmDIG0_TMDS_CNTL_BASE_IDX 2 +#define mmDIG0_TMDS_CONTROL_CHAR 0x20d4 +#define mmDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2 +#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x20d5 +#define mmDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2 +#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20d6 +#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2 +#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20d7 +#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2 +#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20d8 +#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2 +#define mmDIG0_TMDS_CTL_BITS 0x20da +#define mmDIG0_TMDS_CTL_BITS_BASE_IDX 2 +#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x20db +#define mmDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2 +#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20dc +#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2 +#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x20dd +#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2 +#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x20de +#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2 +#define mmDIG0_DIG_VERSION 0x20e0 +#define mmDIG0_DIG_VERSION_BASE_IDX 2 +#define mmDIG0_DIG_LANE_ENABLE 0x20e1 +#define mmDIG0_DIG_LANE_ENABLE_BASE_IDX 2 +#define mmDIG0_AFMT_CNTL 0x20e6 +#define mmDIG0_AFMT_CNTL_BASE_IDX 2 +#define mmDIG0_AFMT_VBI_PACKET_CONTROL1 0x20e7 +#define mmDIG0_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20f6 +#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dp0_dispdec +// base address: 0x0 +#define mmDP0_DP_LINK_CNTL 0x2108 +#define mmDP0_DP_LINK_CNTL_BASE_IDX 2 +#define mmDP0_DP_PIXEL_FORMAT 0x2109 +#define mmDP0_DP_PIXEL_FORMAT_BASE_IDX 2 +#define mmDP0_DP_MSA_COLORIMETRY 0x210a +#define mmDP0_DP_MSA_COLORIMETRY_BASE_IDX 2 +#define mmDP0_DP_CONFIG 0x210b +#define mmDP0_DP_CONFIG_BASE_IDX 2 +#define mmDP0_DP_VID_STREAM_CNTL 0x210c +#define mmDP0_DP_VID_STREAM_CNTL_BASE_IDX 2 +#define mmDP0_DP_STEER_FIFO 0x210d +#define mmDP0_DP_STEER_FIFO_BASE_IDX 2 +#define mmDP0_DP_MSA_MISC 0x210e +#define mmDP0_DP_MSA_MISC_BASE_IDX 2 +#define mmDP0_DP_VID_TIMING 0x2110 +#define mmDP0_DP_VID_TIMING_BASE_IDX 2 +#define mmDP0_DP_VID_N 0x2111 +#define mmDP0_DP_VID_N_BASE_IDX 2 +#define mmDP0_DP_VID_M 0x2112 +#define mmDP0_DP_VID_M_BASE_IDX 2 +#define mmDP0_DP_LINK_FRAMING_CNTL 0x2113 +#define mmDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2 +#define mmDP0_DP_HBR2_EYE_PATTERN 0x2114 +#define mmDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2 +#define mmDP0_DP_VID_MSA_VBID 0x2115 +#define mmDP0_DP_VID_MSA_VBID_BASE_IDX 2 +#define mmDP0_DP_VID_INTERRUPT_CNTL 0x2116 +#define mmDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_CNTL 0x2117 +#define mmDP0_DP_DPHY_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x2118 +#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2 +#define mmDP0_DP_DPHY_SYM0 0x2119 +#define mmDP0_DP_DPHY_SYM0_BASE_IDX 2 +#define mmDP0_DP_DPHY_SYM1 0x211a +#define mmDP0_DP_DPHY_SYM1_BASE_IDX 2 +#define mmDP0_DP_DPHY_SYM2 0x211b +#define mmDP0_DP_DPHY_SYM2_BASE_IDX 2 +#define mmDP0_DP_DPHY_8B10B_CNTL 0x211c +#define mmDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_PRBS_CNTL 0x211d +#define mmDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_SCRAM_CNTL 0x211e +#define mmDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_CRC_EN 0x211f +#define mmDP0_DP_DPHY_CRC_EN_BASE_IDX 2 +#define mmDP0_DP_DPHY_CRC_CNTL 0x2120 +#define mmDP0_DP_DPHY_CRC_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_CRC_RESULT 0x2121 +#define mmDP0_DP_DPHY_CRC_RESULT_BASE_IDX 2 +#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x2122 +#define mmDP0_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x2123 +#define mmDP0_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2 +#define mmDP0_DP_DPHY_FAST_TRAINING 0x2124 +#define mmDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2 +#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x2125 +#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL 0x212b +#define mmDP0_DP_SEC_CNTL_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL1 0x212c +#define mmDP0_DP_SEC_CNTL1_BASE_IDX 2 +#define mmDP0_DP_SEC_FRAMING1 0x212d +#define mmDP0_DP_SEC_FRAMING1_BASE_IDX 2 +#define mmDP0_DP_SEC_FRAMING2 0x212e +#define mmDP0_DP_SEC_FRAMING2_BASE_IDX 2 +#define mmDP0_DP_SEC_FRAMING3 0x212f +#define mmDP0_DP_SEC_FRAMING3_BASE_IDX 2 +#define mmDP0_DP_SEC_FRAMING4 0x2130 +#define mmDP0_DP_SEC_FRAMING4_BASE_IDX 2 +#define mmDP0_DP_SEC_AUD_N 0x2131 +#define mmDP0_DP_SEC_AUD_N_BASE_IDX 2 +#define mmDP0_DP_SEC_AUD_N_READBACK 0x2132 +#define mmDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2 +#define mmDP0_DP_SEC_AUD_M 0x2133 +#define mmDP0_DP_SEC_AUD_M_BASE_IDX 2 +#define mmDP0_DP_SEC_AUD_M_READBACK 0x2134 +#define mmDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2 +#define mmDP0_DP_SEC_TIMESTAMP 0x2135 +#define mmDP0_DP_SEC_TIMESTAMP_BASE_IDX 2 +#define mmDP0_DP_SEC_PACKET_CNTL 0x2136 +#define mmDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2 +#define mmDP0_DP_MSE_RATE_CNTL 0x2137 +#define mmDP0_DP_MSE_RATE_CNTL_BASE_IDX 2 +#define mmDP0_DP_MSE_RATE_UPDATE 0x2139 +#define mmDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT0 0x213a +#define mmDP0_DP_MSE_SAT0_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT1 0x213b +#define mmDP0_DP_MSE_SAT1_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT2 0x213c +#define mmDP0_DP_MSE_SAT2_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT_UPDATE 0x213d +#define mmDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2 +#define mmDP0_DP_MSE_LINK_TIMING 0x213e +#define mmDP0_DP_MSE_LINK_TIMING_BASE_IDX 2 +#define mmDP0_DP_MSE_MISC_CNTL 0x213f +#define mmDP0_DP_MSE_MISC_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x2144 +#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2 +#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x2145 +#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT0_STATUS 0x2147 +#define mmDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT1_STATUS 0x2148 +#define mmDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2 +#define mmDP0_DP_MSE_SAT2_STATUS 0x2149 +#define mmDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2 +#define mmDP0_DP_MSA_TIMING_PARAM1 0x214c +#define mmDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2 +#define mmDP0_DP_MSA_TIMING_PARAM2 0x214d +#define mmDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2 +#define mmDP0_DP_MSA_TIMING_PARAM3 0x214e +#define mmDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2 +#define mmDP0_DP_MSA_TIMING_PARAM4 0x214f +#define mmDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2 +#define mmDP0_DP_DSC_CNTL 0x2152 +#define mmDP0_DP_DSC_CNTL_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL2 0x2153 +#define mmDP0_DP_SEC_CNTL2_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL3 0x2154 +#define mmDP0_DP_SEC_CNTL3_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL4 0x2155 +#define mmDP0_DP_SEC_CNTL4_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL5 0x2156 +#define mmDP0_DP_SEC_CNTL5_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL6 0x2157 +#define mmDP0_DP_SEC_CNTL6_BASE_IDX 2 +#define mmDP0_DP_SEC_CNTL7 0x2158 +#define mmDP0_DP_SEC_CNTL7_BASE_IDX 2 +#define mmDP0_DP_DB_CNTL 0x2159 +#define mmDP0_DP_DB_CNTL_BASE_IDX 2 +#define mmDP0_DP_MSA_VBID_MISC 0x215a +#define mmDP0_DP_MSA_VBID_MISC_BASE_IDX 2 +#define mmDP0_DP_SEC_METADATA_TRANSMISSION 0x215b +#define mmDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2 +#define mmDP0_DP_DSC_BYTES_PER_PIXEL 0x215c +#define mmDP0_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dig1_dispdec +// base address: 0x400 +#define mmDIG1_DIG_FE_CNTL 0x2168 +#define mmDIG1_DIG_FE_CNTL_BASE_IDX 2 +#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x2169 +#define mmDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2 +#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x216a +#define mmDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2 +#define mmDIG1_DIG_CLOCK_PATTERN 0x216b +#define mmDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2 +#define mmDIG1_DIG_TEST_PATTERN 0x216c +#define mmDIG1_DIG_TEST_PATTERN_BASE_IDX 2 +#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x216d +#define mmDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2 +#define mmDIG1_DIG_FIFO_STATUS 0x216e +#define mmDIG1_DIG_FIFO_STATUS_BASE_IDX 2 +#define mmDIG1_HDMI_METADATA_PACKET_CONTROL 0x216f +#define mmDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x2170 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2 +#define mmDIG1_HDMI_CONTROL 0x2171 +#define mmDIG1_HDMI_CONTROL_BASE_IDX 2 +#define mmDIG1_HDMI_STATUS 0x2172 +#define mmDIG1_HDMI_STATUS_BASE_IDX 2 +#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x2173 +#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x2174 +#define mmDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x2175 +#define mmDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x2176 +#define mmDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2 +#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x2177 +#define mmDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x2178 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2 +#define mmDIG1_AFMT_INTERRUPT_STATUS 0x2179 +#define mmDIG1_AFMT_INTERRUPT_STATUS_BASE_IDX 2 +#define mmDIG1_HDMI_GC 0x217b +#define mmDIG1_HDMI_GC_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x217c +#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC1_0 0x217d +#define mmDIG1_AFMT_ISRC1_0_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC1_1 0x217e +#define mmDIG1_AFMT_ISRC1_1_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC1_2 0x217f +#define mmDIG1_AFMT_ISRC1_2_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC1_3 0x2180 +#define mmDIG1_AFMT_ISRC1_3_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC1_4 0x2181 +#define mmDIG1_AFMT_ISRC1_4_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC2_0 0x2182 +#define mmDIG1_AFMT_ISRC2_0_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC2_1 0x2183 +#define mmDIG1_AFMT_ISRC2_1_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC2_2 0x2184 +#define mmDIG1_AFMT_ISRC2_2_BASE_IDX 2 +#define mmDIG1_AFMT_ISRC2_3 0x2185 +#define mmDIG1_AFMT_ISRC2_3_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x2186 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x2187 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2 +#define mmDIG1_HDMI_DB_CONTROL 0x2188 +#define mmDIG1_HDMI_DB_CONTROL_BASE_IDX 2 +#define mmDIG1_DME_CONTROL 0x2189 +#define mmDIG1_DME_CONTROL_BASE_IDX 2 +#define mmDIG1_AFMT_MPEG_INFO0 0x218a +#define mmDIG1_AFMT_MPEG_INFO0_BASE_IDX 2 +#define mmDIG1_AFMT_MPEG_INFO1 0x218b +#define mmDIG1_AFMT_MPEG_INFO1_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_HDR 0x218c +#define mmDIG1_AFMT_GENERIC_HDR_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_0 0x218d +#define mmDIG1_AFMT_GENERIC_0_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_1 0x218e +#define mmDIG1_AFMT_GENERIC_1_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_2 0x218f +#define mmDIG1_AFMT_GENERIC_2_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_3 0x2190 +#define mmDIG1_AFMT_GENERIC_3_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_4 0x2191 +#define mmDIG1_AFMT_GENERIC_4_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_5 0x2192 +#define mmDIG1_AFMT_GENERIC_5_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_6 0x2193 +#define mmDIG1_AFMT_GENERIC_6_BASE_IDX 2 +#define mmDIG1_AFMT_GENERIC_7 0x2194 +#define mmDIG1_AFMT_GENERIC_7_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x2195 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_32_0 0x2196 +#define mmDIG1_HDMI_ACR_32_0_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_32_1 0x2197 +#define mmDIG1_HDMI_ACR_32_1_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_44_0 0x2198 +#define mmDIG1_HDMI_ACR_44_0_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_44_1 0x2199 +#define mmDIG1_HDMI_ACR_44_1_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_48_0 0x219a +#define mmDIG1_HDMI_ACR_48_0_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_48_1 0x219b +#define mmDIG1_HDMI_ACR_48_1_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_STATUS_0 0x219c +#define mmDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2 +#define mmDIG1_HDMI_ACR_STATUS_1 0x219d +#define mmDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_INFO0 0x219e +#define mmDIG1_AFMT_AUDIO_INFO0_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_INFO1 0x219f +#define mmDIG1_AFMT_AUDIO_INFO1_BASE_IDX 2 +#define mmDIG1_AFMT_60958_0 0x21a0 +#define mmDIG1_AFMT_60958_0_BASE_IDX 2 +#define mmDIG1_AFMT_60958_1 0x21a1 +#define mmDIG1_AFMT_60958_1_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x21a2 +#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2 +#define mmDIG1_AFMT_RAMP_CONTROL0 0x21a3 +#define mmDIG1_AFMT_RAMP_CONTROL0_BASE_IDX 2 +#define mmDIG1_AFMT_RAMP_CONTROL1 0x21a4 +#define mmDIG1_AFMT_RAMP_CONTROL1_BASE_IDX 2 +#define mmDIG1_AFMT_RAMP_CONTROL2 0x21a5 +#define mmDIG1_AFMT_RAMP_CONTROL2_BASE_IDX 2 +#define mmDIG1_AFMT_RAMP_CONTROL3 0x21a6 +#define mmDIG1_AFMT_RAMP_CONTROL3_BASE_IDX 2 +#define mmDIG1_AFMT_60958_2 0x21a7 +#define mmDIG1_AFMT_60958_2_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x21a8 +#define mmDIG1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2 +#define mmDIG1_AFMT_STATUS 0x21a9 +#define mmDIG1_AFMT_STATUS_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x21aa +#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x21ab +#define mmDIG1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2 +#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x21ac +#define mmDIG1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2 +#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x21ad +#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2 +#define mmDIG1_DIG_BE_CNTL 0x21af +#define mmDIG1_DIG_BE_CNTL_BASE_IDX 2 +#define mmDIG1_DIG_BE_EN_CNTL 0x21b0 +#define mmDIG1_DIG_BE_EN_CNTL_BASE_IDX 2 +#define mmDIG1_TMDS_CNTL 0x21d3 +#define mmDIG1_TMDS_CNTL_BASE_IDX 2 +#define mmDIG1_TMDS_CONTROL_CHAR 0x21d4 +#define mmDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2 +#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x21d5 +#define mmDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2 +#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x21d6 +#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2 +#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x21d7 +#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2 +#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x21d8 +#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2 +#define mmDIG1_TMDS_CTL_BITS 0x21da +#define mmDIG1_TMDS_CTL_BITS_BASE_IDX 2 +#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x21db +#define mmDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2 +#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x21dc +#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2 +#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x21dd +#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2 +#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x21de +#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2 +#define mmDIG1_DIG_VERSION 0x21e0 +#define mmDIG1_DIG_VERSION_BASE_IDX 2 +#define mmDIG1_DIG_LANE_ENABLE 0x21e1 +#define mmDIG1_DIG_LANE_ENABLE_BASE_IDX 2 +#define mmDIG1_AFMT_CNTL 0x21e6 +#define mmDIG1_AFMT_CNTL_BASE_IDX 2 +#define mmDIG1_AFMT_VBI_PACKET_CONTROL1 0x21e7 +#define mmDIG1_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21f6 +#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2 + + +// addressBlock: dce_dc_dio_dp1_dispdec +// base address: 0x400 +#define mmDP1_DP_LINK_CNTL 0x2208 +#define mmDP1_DP_LINK_CNTL_BASE_IDX 2 +#define mmDP1_DP_PIXEL_FORMAT 0x2209 +#define mmDP1_DP_PIXEL_FORMAT_BASE_IDX 2 +#define mmDP1_DP_MSA_COLORIMETRY 0x220a +#define mmDP1_DP_MSA_COLORIMETRY_BASE_IDX 2 +#define mmDP1_DP_CONFIG 0x220b +#define mmDP1_DP_CONFIG_BASE_IDX 2 +#define mmDP1_DP_VID_STREAM_CNTL 0x220c +#define mmDP1_DP_VID_STREAM_CNTL_BASE_IDX 2 +#define mmDP1_DP_STEER_FIFO 0x220d +#define mmDP1_DP_STEER_FIFO_BASE_IDX 2 +#define mmDP1_DP_MSA_MISC 0x220e +#define mmDP1_DP_MSA_MISC_BASE_IDX 2 +#define mmDP1_DP_VID_TIMING 0x2210 +#define mmDP1_DP_VID_TIMING_BASE_IDX 2 +#define mmDP1_DP_VID_N 0x2211 +#define mmDP1_DP_VID_N_BASE_IDX 2 +#define mmDP1_DP_VID_M 0x2212 +#define mmDP1_DP_VID_M_BASE_IDX 2 +#define mmDP1_DP_LINK_FRAMING_CNTL 0x2213 +#define mmDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2 +#define mmDP1_DP_HBR2_EYE_PATTERN 0x2214 +#define mmDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2 +#define mmDP1_DP_VID_MSA_VBID 0x2215 +#define mmDP1_DP_VID_MSA_VBID_BASE_IDX 2 +#define mmDP1_DP_VID_INTERRUPT_CNTL 0x2216 +#define mmDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_CNTL 0x2217 +#define mmDP1_DP_DPHY_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2218 +#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2 +#define mmDP1_DP_DPHY_SYM0 0x2219 +#define mmDP1_DP_DPHY_SYM0_BASE_IDX 2 +#define mmDP1_DP_DPHY_SYM1 0x221a +#define mmDP1_DP_DPHY_SYM1_BASE_IDX 2 +#define mmDP1_DP_DPHY_SYM2 0x221b +#define mmDP1_DP_DPHY_SYM2_BASE_IDX 2 +#define mmDP1_DP_DPHY_8B10B_CNTL 0x221c +#define mmDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_PRBS_CNTL 0x221d +#define mmDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_SCRAM_CNTL 0x221e +#define mmDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_CRC_EN 0x221f +#define mmDP1_DP_DPHY_CRC_EN_BASE_IDX 2 +#define mmDP1_DP_DPHY_CRC_CNTL 0x2220 +#define mmDP1_DP_DPHY_CRC_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_CRC_RESULT 0x2221 +#define mmDP1_DP_DPHY_CRC_RESULT_BASE_IDX 2 +#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x2222 +#define mmDP1_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x2223 +#define mmDP1_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2 +#define mmDP1_DP_DPHY_FAST_TRAINING 0x2224 +#define mmDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2 +#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x2225 +#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL 0x222b +#define mmDP1_DP_SEC_CNTL_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL1 0x222c +#define mmDP1_DP_SEC_CNTL1_BASE_IDX 2 +#define mmDP1_DP_SEC_FRAMING1 0x222d +#define mmDP1_DP_SEC_FRAMING1_BASE_IDX 2 +#define mmDP1_DP_SEC_FRAMING2 0x222e +#define mmDP1_DP_SEC_FRAMING2_BASE_IDX 2 +#define mmDP1_DP_SEC_FRAMING3 0x222f +#define mmDP1_DP_SEC_FRAMING3_BASE_IDX 2 +#define mmDP1_DP_SEC_FRAMING4 0x2230 +#define mmDP1_DP_SEC_FRAMING4_BASE_IDX 2 +#define mmDP1_DP_SEC_AUD_N 0x2231 +#define mmDP1_DP_SEC_AUD_N_BASE_IDX 2 +#define mmDP1_DP_SEC_AUD_N_READBACK 0x2232 +#define mmDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2 +#define mmDP1_DP_SEC_AUD_M 0x2233 +#define mmDP1_DP_SEC_AUD_M_BASE_IDX 2 +#define mmDP1_DP_SEC_AUD_M_READBACK 0x2234 +#define mmDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2 +#define mmDP1_DP_SEC_TIMESTAMP 0x2235 +#define mmDP1_DP_SEC_TIMESTAMP_BASE_IDX 2 +#define mmDP1_DP_SEC_PACKET_CNTL 0x2236 +#define mmDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2 +#define mmDP1_DP_MSE_RATE_CNTL 0x2237 +#define mmDP1_DP_MSE_RATE_CNTL_BASE_IDX 2 +#define mmDP1_DP_MSE_RATE_UPDATE 0x2239 +#define mmDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT0 0x223a +#define mmDP1_DP_MSE_SAT0_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT1 0x223b +#define mmDP1_DP_MSE_SAT1_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT2 0x223c +#define mmDP1_DP_MSE_SAT2_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT_UPDATE 0x223d +#define mmDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2 +#define mmDP1_DP_MSE_LINK_TIMING 0x223e +#define mmDP1_DP_MSE_LINK_TIMING_BASE_IDX 2 +#define mmDP1_DP_MSE_MISC_CNTL 0x223f +#define mmDP1_DP_MSE_MISC_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x2244 +#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2 +#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x2245 +#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT0_STATUS 0x2247 +#define mmDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT1_STATUS 0x2248 +#define mmDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2 +#define mmDP1_DP_MSE_SAT2_STATUS 0x2249 +#define mmDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2 +#define mmDP1_DP_MSA_TIMING_PARAM1 0x224c +#define mmDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2 +#define mmDP1_DP_MSA_TIMING_PARAM2 0x224d +#define mmDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2 +#define mmDP1_DP_MSA_TIMING_PARAM3 0x224e +#define mmDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2 +#define mmDP1_DP_MSA_TIMING_PARAM4 0x224f +#define mmDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2 +#define mmDP1_DP_DSC_CNTL 0x2252 +#define mmDP1_DP_DSC_CNTL_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL2 0x2253 +#define mmDP1_DP_SEC_CNTL2_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL3 0x2254 +#define mmDP1_DP_SEC_CNTL3_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL4 0x2255 +#define mmDP1_DP_SEC_CNTL4_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL5 0x2256 +#define mmDP1_DP_SEC_CNTL5_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL6 0x2257 +#define mmDP1_DP_SEC_CNTL6_BASE_IDX 2 +#define mmDP1_DP_SEC_CNTL7 0x2258 +#define mmDP1_DP_SEC_CNTL7_BASE_IDX 2 +#define mmDP1_DP_DB_CNTL 0x2259 +#define mmDP1_DP_DB_CNTL_BASE_IDX 2 +#define mmDP1_DP_MSA_VBID_MISC 0x225a +#define mmDP1_DP_MSA_VBID_MISC_BASE_IDX 2 +#define mmDP1_DP_SEC_METADATA_TRANSMISSION 0x225b +#define mmDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2 +#define mmDP1_DP_DSC_BYTES_PER_PIXEL 0x225c +#define mmDP1_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2 + + +// addressBlock: dce_dc_dcio_dcio_dispdec +// base address: 0x0 +#define mmDC_GENERICA 0x2868 +#define mmDC_GENERICA_BASE_IDX 2 +#define mmUNIPHYA_LINK_CNTL 0x286d +#define mmUNIPHYA_LINK_CNTL_BASE_IDX 2 +#define mmUNIPHYA_CHANNEL_XBAR_CNTL 0x286e +#define mmUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2 +#define mmUNIPHYB_LINK_CNTL 0x286f +#define mmUNIPHYB_LINK_CNTL_BASE_IDX 2 +#define mmUNIPHYB_CHANNEL_XBAR_CNTL 0x2870 +#define mmUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2 +#define mmDCIO_WRCMD_DELAY 0x287e +#define mmDCIO_WRCMD_DELAY_BASE_IDX 2 +#define mmDC_PINSTRAPS 0x2880 +#define mmDC_PINSTRAPS_BASE_IDX 2 +#define mmDCIO_CLOCK_CNTL 0x2895 +#define mmDCIO_CLOCK_CNTL_BASE_IDX 2 +#define mmDCIO_SOFT_RESET 0x289e +#define mmDCIO_SOFT_RESET_BASE_IDX 2 + + +// addressBlock: dce_dc_dcio_dcio_chip_dispdec +// base address: 0x0 +#define mmDC_GPIO_DDC1_MASK 0x28d0 +#define mmDC_GPIO_DDC1_MASK_BASE_IDX 2 +#define mmDC_GPIO_DDC1_A 0x28d1 +#define mmDC_GPIO_DDC1_A_BASE_IDX 2 +#define mmDC_GPIO_DDC1_EN 0x28d2 +#define mmDC_GPIO_DDC1_EN_BASE_IDX 2 +#define mmDC_GPIO_DDC1_Y 0x28d3 +#define mmDC_GPIO_DDC1_Y_BASE_IDX 2 +#define mmDC_GPIO_DDC2_MASK 0x28d4 +#define mmDC_GPIO_DDC2_MASK_BASE_IDX 2 +#define mmDC_GPIO_DDC2_A 0x28d5 +#define mmDC_GPIO_DDC2_A_BASE_IDX 2 +#define mmDC_GPIO_DDC2_EN 0x28d6 +#define mmDC_GPIO_DDC2_EN_BASE_IDX 2 +#define mmDC_GPIO_DDC2_Y 0x28d7 +#define mmDC_GPIO_DDC2_Y_BASE_IDX 2 +#define mmDC_GPIO_HPD_MASK 0x28f4 +#define mmDC_GPIO_HPD_MASK_BASE_IDX 2 +#define mmDC_GPIO_HPD_A 0x28f5 +#define mmDC_GPIO_HPD_A_BASE_IDX 2 +#define mmDC_GPIO_HPD_EN 0x28f6 +#define mmDC_GPIO_HPD_EN_BASE_IDX 2 +#define mmDC_GPIO_HPD_Y 0x28f7 +#define mmDC_GPIO_HPD_Y_BASE_IDX 2 +#define mmDC_GPIO_PAD_STRENGTH_1 0x28fc +#define mmDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2 +#define mmPHY_AUX_CNTL 0x28ff +#define mmPHY_AUX_CNTL_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_1 0x2917 +#define mmDC_GPIO_AUX_CTRL_1_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_2 0x2918 +#define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_3 0x291b +#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_4 0x291c +#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_5 0x291d +#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2 +#define mmAUXI2C_PAD_ALL_PWR_OK 0x291e +#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2 + +// addressBlock: azf0endpoint0_endpointind +// base address: 0x0 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058 +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059 +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060 +#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069 +#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a +#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b +#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c +#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d +#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e + + +// addressBlock: azf0endpoint1_endpointind +// base address: 0x0 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058 +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059 +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060 +#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069 +#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a +#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b +#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c +#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d +#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e + + +// addressBlock: azf0inputendpoint0_inputendpointind +// base address: 0x0 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067 +#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068 + + +// addressBlock: azf0inputendpoint1_inputendpointind +// base address: 0x0 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067 +#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h new file mode 100755 index 000000000000..91969554e36a --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h @@ -0,0 +1,22091 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _dcn_2_0_3_SH_MASK_HEADER +#define _dcn_2_0_3_SH_MASK_HEADER + + +// addressBlock: dce_dc_dccg_dccg_dispdec +//PHYPLLA_PIXCLK_RESYNC_CNTL +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +//PHYPLLB_PIXCLK_RESYNC_CNTL +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +//DP_DTO_DBUF_EN +#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0 +#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1 +#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L +#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L +//DPREFCLK_CGTT_BLK_CTRL_REG +#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0 +#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +//REFCLK_CNTL +#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0 +#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1 +#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x00000001L +#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x00000002L +//DCCG_DS_DTO_INCR +#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0 +#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL +//DCCG_DS_DTO_MODULO +#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0 +#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL +//DCCG_DS_CNTL +#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0 +#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4 +#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8 +#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9 +#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10 +#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18 +#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19 +#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L +#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L +#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L +#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L +#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L +#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L +#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L +//DCCG_DS_HW_CAL_INTERVAL +#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0 +#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL +//DPREFCLK_CNTL +#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0 +#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L +//DCE_VERSION +#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0 +#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8 +#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL +#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L +//DCCG_GTC_CNTL +#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0 +#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L +//DCCG_GTC_DTO_INCR +#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0 +#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL +//DCCG_GTC_DTO_MODULO +#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0 +#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL +//DCCG_GTC_CURRENT +#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0 +#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL +//MILLISECOND_TIME_BASE_DIV +#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0 +#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14 +#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL +#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L +//DISPCLK_FREQ_CHANGE_CNTL +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0 +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10 +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14 +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19 +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L +#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L +#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L +//DC_MEM_GLOBAL_PWR_REQ_CNTL +#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0 +#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L +//DCCG_PERFMON_CNTL +#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0 +#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1 +#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2 +#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3 +#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4 +#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5 +#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6 +#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7 +#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8 +#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb +#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L +#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L +#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L +#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L +#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L +#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L +#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L +#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L +#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L +#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L +//DCCG_GATE_DISABLE_CNTL +#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0 +#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1 +#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2 +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3 +#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4 +#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6 +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8 +#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9 +#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa +#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb +#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE__SHIFT 0xc +#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11 +#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12 +#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13 +#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15 +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16 +#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a +#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b +#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c +#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d +#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e +#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L +#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L +#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L +#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L +#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L +#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L +#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L +#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L +#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE_MASK 0x00001000L +#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L +#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L +#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L +#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x00200000L +#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L +#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L +#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L +#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L +#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L +#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L +//DISPCLK_CGTT_BLK_CTRL_REG +#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0 +#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +//SOCCLK_CGTT_BLK_CTRL_REG +#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0 +#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +//DCCG_CAC_STATUS +#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0 +#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL +//MICROSECOND_TIME_BASE_DIV +#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0 +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8 +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10 +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11 +#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14 +#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L +#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L +#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L +//DCCG_GATE_DISABLE_CNTL2 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16 +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L +#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L +//SYMCLK_CGTT_BLK_CTRL_REG +#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0 +#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +//DCCG_DISP_CNTL_REG +#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8 +#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L +//OTG0_PIXEL_RATE_CNTL +#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0 +#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4 +#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5 +#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8 +#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9 +#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN__SHIFT 0xb +#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe +#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10 +#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L +#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L +#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L +#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L +#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L +#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN_MASK 0x00000800L +#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L +#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L +//DP_DTO0_PHASE +#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0 +#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL +//DP_DTO0_MODULO +#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0 +#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL +//OTG0_PHYPLL_PIXEL_RATE_CNTL +#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0 +#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4 +#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L +#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L +//OTG1_PIXEL_RATE_CNTL +#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0 +#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4 +#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5 +#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8 +#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9 +#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN__SHIFT 0xb +#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe +#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10 +#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L +#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L +#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L +#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L +#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L +#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN_MASK 0x00000800L +#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L +#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L +//DP_DTO1_PHASE +#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0 +#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL +//DP_DTO1_MODULO +#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0 +#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL +//OTG1_PHYPLL_PIXEL_RATE_CNTL +#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0 +#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4 +#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L +#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L +//DPPCLK_CGTT_BLK_CTRL_REG +#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0 +#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +//DPPCLK0_DTO_PARAM +#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0 +#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10 +#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL +#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L +//DPPCLK1_DTO_PARAM +#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0 +#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10 +#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL +#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L +//DPPCLK2_DTO_PARAM +#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0 +#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10 +#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL +#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L +//DPPCLK3_DTO_PARAM +#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0 +#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10 +#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL +#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L +//DCCG_CAC_STATUS2 +#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0 +#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0000000FL +//SYMCLKA_CLOCK_ENABLE +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0 +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4 +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8 +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L +#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L +//SYMCLKB_CLOCK_ENABLE +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0 +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4 +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8 +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L +#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L +//DCCG_AUDIO_DTO_SOURCE +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0 +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4 +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10 +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14 +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18 +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000030L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x00003000L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x00010000L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L +#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L +//DCCG_AUDIO_DTO0_PHASE +#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0 +#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL +//DCCG_AUDIO_DTO0_MODULE +#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0 +#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL +//DCCG_AUDIO_DTO1_PHASE +#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0 +#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL +//DCCG_AUDIO_DTO1_MODULE +#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0 +#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL +//DCCG_VSYNC_OTG0_LATCH_VALUE +#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0 +#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL +//DCCG_VSYNC_OTG1_LATCH_VALUE +#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0 +#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL +//DPPCLK_DTO_CTRL +#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE__SHIFT 0x0 +#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1 +#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE__SHIFT 0x4 +#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5 +#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE__SHIFT 0x8 +#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9 +#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE__SHIFT 0xc +#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd +#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE_MASK 0x00000001L +#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L +#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE_MASK 0x00000010L +#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L +#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE_MASK 0x00000100L +#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L +#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE_MASK 0x00001000L +#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L +//DCCG_VSYNC_CNT_CTRL +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL__SHIFT 0x1 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19 +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL_MASK 0x00000002L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L +#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L +//DCCG_VSYNC_CNT_INT_CTRL +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9 +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L +#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L + + +// addressBlock: dce_dc_dccg_dccg_dfs_dispdec +//DENTIST_DISPCLK_CNTL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16 +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18 +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L +#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L +//DISP_INTERRUPT_STATUS +#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9 +#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa +#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf +#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10 +#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11 +#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12 +#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13 +#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14 +#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT__SHIFT 0x16 +#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17 +#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18 +#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a +#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b +#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c +#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d +#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e +#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f +#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L +#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L +#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L +#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L +#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L +#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L +#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L +#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L +#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT_MASK 0x00400000L +#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L +#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L + +//AZ_CLOCK_CNTL +#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0 +#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8 +#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x10 +#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L +#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L +#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00010000L + +// addressBlock: dce_dc_hda_azf0endpoint0_dispdec +//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL + +// addressBlock: dce_dc_hda_azf0endpoint1_dispdec +//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL + +// addressBlock: dce_dc_hda_azf0controller_dispdec +//AZALIA_CONTROLLER_CLOCK_GATING +#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0 +#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4 +#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L +#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L +//AZALIA_AUDIO_DTO +#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0 +#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10 +#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL +#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L +//AZALIA_AUDIO_DTO_CONTROL +#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8 +#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L +//AZALIA_SOCCLK_CONTROL +#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1 +#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L +//AZALIA_UNDERFLOW_FILLER_SAMPLE +#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0 +#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL +//AZALIA_DATA_DMA_CONTROL +#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0 +#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2 +#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4 +#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6 +#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10 +#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11 +#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L +#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL +#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L +#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L +#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L +#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L +//AZALIA_BDL_DMA_CONTROL +#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0 +#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2 +#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4 +#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6 +#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L +#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL +#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L +#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L +//AZALIA_RIRB_AND_DP_CONTROL +#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0 +#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4 +#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5 +#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L +#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L +#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L +//AZALIA_CORB_DMA_CONTROL +#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0 +#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4 + + +// addressBlock: dce_dc_hda_azf0root_dispdec +//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID +#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0 +#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL +//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID +#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0 +#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL +//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL +#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0 +#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4 +#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L +#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L +//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL +#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0 +#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL +//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL +//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L +#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L +//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L +//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L +//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L +//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0 +#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL +//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY +#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0 +#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4 +#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L +#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L +//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY +#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0 +#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4 +#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L +#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L +//REG_DC_AUDIO_PORT_CONNECTIVITY +#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0 +#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4 +#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L +#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L +//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY +#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0 +#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4 +#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L +#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L + +// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec +//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX +#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0 +#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL +//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA +#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0 +#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec +//DCHUBBUB_SDPIF_CFG0 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa +#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc +#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe +#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf +#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19 +#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L +#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L +//DCHUBBUB_SDPIF_PIPE_SEC_LVL +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0 +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x3 +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x6 +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0x9 +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x00000007L +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x00000038L +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x000001C0L +#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x00000E00L +//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0 +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x3 +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x6 +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0x9 +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x00000007L +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x00000038L +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x000001C0L +#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x00000E00L +//DCHUBBUB_SDPIF_MEM_PWR_CTRL +#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0 +#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2 +#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L +#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L +//DCHUBBUB_SDPIF_MEM_PWR_STATUS +#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0 +#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L +//DCHUBBUB_SDPIF_CFG1 +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0 +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1 +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2 +#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8 +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L +#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L +#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L + + +// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec +//DCHUBBUB_RET_PATH_DCC_CFG +#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN_MASK 0x00000001L +//DCHUBBUB_RET_PATH_DCC_CFG0_0 +#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG0_1 +#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG1_0 +#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG1_1 +#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG2_0 +#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG2_1 +#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG3_0 +#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG3_1 +#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG4_0 +#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG4_1 +#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG5_0 +#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG5_1 +#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG6_0 +#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG6_1 +#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG7_0 +#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_DCC_CFG7_1 +#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1_MASK 0xFFFFFFFFL +//DCHUBBUB_RET_PATH_MEM_PWR_CTRL +#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2 +#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L +#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L +//DCHUBBUB_RET_PATH_MEM_PWR_STATUS +#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0 +#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L + +// addressBlock: dce_dc_dchubbub_hubbub_dispdec +//DCHUBBUB_ARB_DF_REQ_OUTSTAND +#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0 +#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0x10 +#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL +#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x01FF0000L +//DCHUBBUB_ARB_SAT_LEVEL +#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0 +#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL +//DCHUBBUB_ARB_QOS_FORCE +#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0 +#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8 +#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL +#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L +//DCHUBBUB_ARB_DRAM_STATE_CNTL +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST__SHIFT 0x8 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL__SHIFT 0x9 +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_MASK 0x00000100L +#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL_MASK 0x00000200L +//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0 +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x001FFFFFL +//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__SHIFT 0x0 +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_MASK 0x001FFFFFL +//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__SHIFT 0x0 +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_MASK 0x001FFFFFL +//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0 +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x001FFFFFL +//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__SHIFT 0x0 +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_MASK 0x001FFFFFL +//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__SHIFT 0x0 +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_MASK 0x001FFFFFL +//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0 +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x001FFFFFL +//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__SHIFT 0x0 +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_MASK 0x001FFFFFL +//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__SHIFT 0x0 +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_MASK 0x001FFFFFL +//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0 +#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x001FFFFFL +//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__SHIFT 0x0 +#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_MASK 0x001FFFFFL +//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__SHIFT 0x0 +#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_MASK 0x001FFFFFL +//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0 +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4 +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5 +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK__SHIFT 0x6 +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8 +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK_MASK 0x00000040L +#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L +//DCHUBBUB_ARB_TIMEOUT_ENABLE +#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0 +#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L +//DCHUBBUB_GLOBAL_TIMER_CNTL +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0 +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10 +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L +#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L +//SURFACE_CHECK0_ADDRESS_LSB +#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0 +#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL +//SURFACE_CHECK0_ADDRESS_MSB +#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0 +#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f +#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL +#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L +//SURFACE_CHECK1_ADDRESS_LSB +#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0 +#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL +//SURFACE_CHECK1_ADDRESS_MSB +#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0 +#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f +#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL +#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L +//SURFACE_CHECK2_ADDRESS_LSB +#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0 +#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL +//SURFACE_CHECK2_ADDRESS_MSB +#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0 +#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f +#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL +#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L +//SURFACE_CHECK3_ADDRESS_LSB +#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0 +#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL +//SURFACE_CHECK3_ADDRESS_MSB +#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0 +#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f +#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL +#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L +//VTG0_CONTROL +#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0 +#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10 +#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f +#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL +#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L +#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L +//VTG1_CONTROL +#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0 +#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10 +#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f +#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL +#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L +#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L +//DCHUBBUB_SOFT_RESET +#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0 +#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1 +#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4 +#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L +#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L +#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L +//DCHUBBUB_CLOCK_CNTL +#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5 +#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6 +#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L +#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L +//DCFCLK_CNTL +#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0 +#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4 +#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f +#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL +#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L +#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L +//DCHUBBUB_VLINE_SNAPSHOT +#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0 +#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L +//DCHUBBUB_CTRL_STATUS +#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0 +#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L +//DCHUBBUB_TIMEOUT_DETECTION_CTRL1 +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0 +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6 +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L +//DCHUBBUB_TIMEOUT_DETECTION_CTRL2 +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0 +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L +#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L +//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0 +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1 +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2 +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3 +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L +#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L +//DCHUBBUB_TEST_DEBUG_INDEX +#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX__SHIFT 0x0 +#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX_MASK 0x000000FFL +//DCHUBBUB_TEST_DEBUG_DATA +#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA__SHIFT 0x0 +#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec +//HUBP0_DCSURF_SURFACE_CONFIG +#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8 +#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa +#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L +#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L +//HUBP0_DCSURF_ADDR_CONFIG +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3 +#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6 +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8 +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa +#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L +#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L +#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L +#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L +//HUBP0_DCSURF_TILING_CONFIG +#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0 +#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7 +#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9 +#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa +#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb +#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL +#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L +#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L +#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L +#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L +//HUBP0_DCSURF_PRI_VIEWPORT_START +#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP0_DCSURF_PRI_VIEWPORT_START_C +#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP0_DCSURF_SEC_VIEWPORT_START +#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP0_DCSURF_SEC_VIEWPORT_START_C +#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP0_DCHUBP_REQ_SIZE_CONFIG +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L +//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18 +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L +#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L +//HUBP0_DCHUBP_CNTL +#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0 +#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1 +#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2 +#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3 +#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4 +#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8 +#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9 +#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc +#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd +#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10 +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14 +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18 +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b +#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c +#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f +#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L +#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L +#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L +#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L +#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L +#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L +#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L +#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L +#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L +#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L +#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L +#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L +#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L +//HUBP0_HUBP_CLK_CNTL +#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16 +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17 +#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L +#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L +//HUBP0_HUBPREQ_DEBUG_DB +#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0 +#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL + +// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec +//HUBPREQ0_DCSURF_SURFACE_PITCH +#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10 +#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL +#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L +//HUBPREQ0_DCSURF_SURFACE_PITCH_C +#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10 +#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL +#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L +//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SURFACE_CONTROL +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13 +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L +#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L +//HUBPREQ0_DCSURF_FLIP_CONTROL +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14 +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L +#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L +//HUBPREQ0_DCSURF_FLIP_CONTROL2 +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0 +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8 +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9 +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L +#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L +//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13 +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L +#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L +//HUBPREQ0_DCSURF_SURFACE_INUSE +#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH +#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SURFACE_INUSE_C +#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C +#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ0_DCN_EXPANSION_MODE +#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0 +#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2 +#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4 +#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6 +#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L +#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL +#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L +#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L +//HUBPREQ0_DCN_TTU_QOS_WM +#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0 +#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10 +#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL +#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L +//HUBPREQ0_DCN_GLOBAL_TTU_CNTL +#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0 +#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c +#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL +#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L +//HUBPREQ0_DCN_SURF0_TTU_CNTL0 +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ0_DCN_SURF0_TTU_CNTL1 +#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ0_DCN_SURF1_TTU_CNTL0 +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ0_DCN_SURF1_TTU_CNTL1 +#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ0_DCN_CUR0_TTU_CNTL0 +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ0_DCN_CUR0_TTU_CNTL1 +#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ0_DCN_CUR1_TTU_CNTL0 +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ0_DCN_CUR1_TTU_CNTL1 +#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ0_BLANK_OFFSET_0 +#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0 +#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10 +#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL +#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L +//HUBPREQ0_BLANK_OFFSET_1 +#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0 +#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL +//HUBPREQ0_DST_DIMENSIONS +#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0 +#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL +//HUBPREQ0_DST_AFTER_SCALER +#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0 +#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10 +#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL +#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L +//HUBPREQ0_PREFETCH_SETTINGS +#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0 +#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18 +#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL +#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L +//HUBPREQ0_PREFETCH_SETTINGS_C +#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0 +#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL +//HUBPREQ0_VBLANK_PARAMETERS_0 +#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0 +#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8 +#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL +#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L +//HUBPREQ0_VBLANK_PARAMETERS_1 +#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0 +#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ0_VBLANK_PARAMETERS_2 +#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0 +#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ0_VBLANK_PARAMETERS_3 +#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0 +#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ0_VBLANK_PARAMETERS_4 +#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0 +#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ0_FLIP_PARAMETERS_0 +#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0 +#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8 +#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL +#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L +//HUBPREQ0_FLIP_PARAMETERS_2 +#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0 +#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL +//HUBPREQ0_NOM_PARAMETERS_4 +#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0 +#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL +//HUBPREQ0_NOM_PARAMETERS_5 +#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0 +#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL +//HUBPREQ0_NOM_PARAMETERS_6 +#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0 +#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL +//HUBPREQ0_NOM_PARAMETERS_7 +#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0 +#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL +//HUBPREQ0_PER_LINE_DELIVERY_PRE +#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0 +#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10 +#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL +#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L +//HUBPREQ0_PER_LINE_DELIVERY +#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0 +#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10 +#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL +#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L +//HUBPREQ0_CURSOR_SETTINGS +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0 +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8 +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10 +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18 +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L +#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L +//HUBPREQ0_REF_FREQ_TO_PIX_FREQ +#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0 +#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL +//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT +#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0 +#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL +//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4 +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6 +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L +#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L +//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6 +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L +#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L + + +// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec +//HUBPRET0_HUBPRET_CONTROL +#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0 +#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10 +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12 +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14 +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16 +#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18 +#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL +#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L +#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L +#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L +//HUBPRET0_HUBPRET_MEM_PWR_CTRL +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14 +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L +#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L +//HUBPRET0_HUBPRET_MEM_PWR_STATUS +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L +//HUBPRET0_HUBPRET_READ_LINE_CTRL0 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL +#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L +//HUBPRET0_HUBPRET_READ_LINE_CTRL1 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10 +#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL +#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L +//HUBPRET0_HUBPRET_READ_LINE0 +#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10 +#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL +#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L +//HUBPRET0_HUBPRET_READ_LINE1 +#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10 +#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL +#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L +//HUBPRET0_HUBPRET_INTERRUPT +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12 +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L +#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L +//HUBPRET0_HUBPRET_READ_LINE_VALUE +#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10 +#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL +#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L +//HUBPRET0_HUBPRET_READ_LINE_STATUS +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0 +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4 +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5 +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8 +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L +#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L + + +// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec +//CURSOR0_0_CURSOR_CONTROL +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18 +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L +#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L +//CURSOR0_0_CURSOR_SURFACE_ADDRESS +#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0 +#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH +#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//CURSOR0_0_CURSOR_SIZE +#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0 +#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10 +#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL +#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L +//CURSOR0_0_CURSOR_POSITION +#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0 +#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10 +#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL +#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L +//CURSOR0_0_CURSOR_HOT_SPOT +#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0 +#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10 +#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL +#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L +//CURSOR0_0_CURSOR_STEREO_CONTROL +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0 +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4 +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12 +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L +#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L +//CURSOR0_0_CURSOR_DST_OFFSET +#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0 +#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL +//CURSOR0_0_CURSOR_MEM_PWR_CTRL +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0 +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2 +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4 +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L +#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L +//CURSOR0_0_CURSOR_MEM_PWR_STATUS +#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0 +#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L +//CURSOR0_0_DMDATA_ADDRESS_HIGH +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L +#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L +//CURSOR0_0_DMDATA_ADDRESS_LOW +#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0 +#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL +//CURSOR0_0_DMDATA_CNTL +#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0 +#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1 +#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2 +#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10 +#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L +#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L +#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L +#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L +//CURSOR0_0_DMDATA_QOS_CNTL +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0 +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4 +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10 +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L +#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L +//CURSOR0_0_DMDATA_STATUS +#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0 +#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2 +#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4 +#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L +#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L +#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L +//CURSOR0_0_DMDATA_SW_CNTL +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0 +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1 +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10 +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L +#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L +//CURSOR0_0_DMDATA_SW_DATA +#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0 +#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec +//HUBP1_DCSURF_SURFACE_CONFIG +#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8 +#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa +#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L +#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L +//HUBP1_DCSURF_ADDR_CONFIG +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3 +#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6 +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8 +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa +#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L +#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L +#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L +#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L +//HUBP1_DCSURF_TILING_CONFIG +#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0 +#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7 +#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9 +#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa +#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb +#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL +#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L +#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L +#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L +#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L +//HUBP1_DCSURF_PRI_VIEWPORT_START +#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP1_DCSURF_PRI_VIEWPORT_START_C +#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP1_DCSURF_SEC_VIEWPORT_START +#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP1_DCSURF_SEC_VIEWPORT_START_C +#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP1_DCHUBP_REQ_SIZE_CONFIG +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L +//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18 +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L +#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L +//HUBP1_DCHUBP_CNTL +#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0 +#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1 +#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2 +#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3 +#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4 +#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8 +#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9 +#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc +#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd +#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10 +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14 +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18 +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b +#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c +#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f +#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L +#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L +#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L +#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L +#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L +#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L +#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L +#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L +#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L +#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L +#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L +#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L +#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L +//HUBP1_HUBP_CLK_CNTL +#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16 +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17 +#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L +#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L +//HUBP1_HUBPREQ_DEBUG_DB +#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0 +#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL +//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L +//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14 +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L +#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L +// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec +//HUBPREQ1_DCSURF_SURFACE_PITCH +#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10 +#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL +#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L +//HUBPREQ1_DCSURF_SURFACE_PITCH_C +#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10 +#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL +#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L +//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SURFACE_CONTROL +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12 +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13 + +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L +#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L +//HUBPREQ1_DCSURF_FLIP_CONTROL +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14 +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L +#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L +//HUBPREQ1_DCSURF_FLIP_CONTROL2 +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0 +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8 +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9 +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L +#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L +//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13 +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L +#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L +//HUBPREQ1_DCSURF_SURFACE_INUSE +#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH +#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SURFACE_INUSE_C +#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C +#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ1_DCN_EXPANSION_MODE +#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0 +#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2 +#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4 +#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6 +#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L +#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL +#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L +#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L +//HUBPREQ1_DCN_TTU_QOS_WM +#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0 +#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10 +#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL +#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L +//HUBPREQ1_DCN_GLOBAL_TTU_CNTL +#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0 +#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c +#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL +#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L +//HUBPREQ1_DCN_SURF0_TTU_CNTL0 +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ1_DCN_SURF0_TTU_CNTL1 +#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ1_DCN_SURF1_TTU_CNTL0 +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ1_DCN_SURF1_TTU_CNTL1 +#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ1_DCN_CUR0_TTU_CNTL0 +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ1_DCN_CUR0_TTU_CNTL1 +#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ1_DCN_CUR1_TTU_CNTL0 +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ1_DCN_CUR1_TTU_CNTL1 +#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ1_BLANK_OFFSET_0 +#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0 +#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10 +#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL +#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L +//HUBPREQ1_BLANK_OFFSET_1 +#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0 +#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL +//HUBPREQ1_DST_DIMENSIONS +#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0 +#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL +//HUBPREQ1_DST_AFTER_SCALER +#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0 +#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10 +#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL +#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L +//HUBPREQ1_PREFETCH_SETTINGS +#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0 +#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18 +#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL +#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L +//HUBPREQ1_PREFETCH_SETTINGS_C +#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0 +#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL +//HUBPREQ1_VBLANK_PARAMETERS_0 +#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0 +#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8 +#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL +#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L +//HUBPREQ1_VBLANK_PARAMETERS_1 +#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0 +#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ1_VBLANK_PARAMETERS_2 +#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0 +#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ1_VBLANK_PARAMETERS_3 +#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0 +#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ1_VBLANK_PARAMETERS_4 +#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0 +#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ1_FLIP_PARAMETERS_0 +#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0 +#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8 +#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL +#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L +//HUBPREQ1_FLIP_PARAMETERS_2 +#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0 +#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL +//HUBPREQ1_NOM_PARAMETERS_4 +#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0 +#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL +//HUBPREQ1_NOM_PARAMETERS_5 +#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0 +#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL +//HUBPREQ1_NOM_PARAMETERS_6 +#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0 +#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL +//HUBPREQ1_NOM_PARAMETERS_7 +#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0 +#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL +//HUBPREQ1_PER_LINE_DELIVERY_PRE +#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0 +#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10 +#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL +#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L +//HUBPREQ1_PER_LINE_DELIVERY +#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0 +#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10 +#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL +#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L +//HUBPREQ1_CURSOR_SETTINGS +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0 +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8 +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10 +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18 +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L +#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L +//HUBPREQ1_REF_FREQ_TO_PIX_FREQ +#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0 +#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL +//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT +#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0 +#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL +//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4 +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6 +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L +#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L +//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6 +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L +#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L +// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec +//HUBPRET1_HUBPRET_CONTROL +#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0 +#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10 +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12 +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14 +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16 +#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18 +#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL +#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L +#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L +#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L +//HUBPRET1_HUBPRET_MEM_PWR_CTRL +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14 +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L +#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L +//HUBPRET1_HUBPRET_MEM_PWR_STATUS +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L +//HUBPRET1_HUBPRET_READ_LINE_CTRL0 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL +#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L +//HUBPRET1_HUBPRET_READ_LINE_CTRL1 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10 +#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL +#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L +//HUBPRET1_HUBPRET_READ_LINE0 +#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10 +#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL +#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L +//HUBPRET1_HUBPRET_READ_LINE1 +#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10 +#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL +#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L +//HUBPRET1_HUBPRET_INTERRUPT +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12 +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L +#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L +//HUBPRET1_HUBPRET_READ_LINE_VALUE +#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10 +#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL +#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L +//HUBPRET1_HUBPRET_READ_LINE_STATUS +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0 +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4 +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5 +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8 +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L +#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L + + +// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec +//CURSOR0_1_CURSOR_CONTROL +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18 +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L +#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L +//CURSOR0_1_CURSOR_SURFACE_ADDRESS +#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0 +#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH +#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//CURSOR0_1_CURSOR_SIZE +#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0 +#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10 +#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL +#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L +//CURSOR0_1_CURSOR_POSITION +#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0 +#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10 +#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL +#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L +//CURSOR0_1_CURSOR_HOT_SPOT +#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0 +#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10 +#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL +#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L +//CURSOR0_1_CURSOR_STEREO_CONTROL +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0 +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4 +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12 +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L +#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L +//CURSOR0_1_CURSOR_DST_OFFSET +#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0 +#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL +//CURSOR0_1_CURSOR_MEM_PWR_CTRL +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0 +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2 +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4 +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L +#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L +//CURSOR0_1_CURSOR_MEM_PWR_STATUS +#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0 +#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L +//CURSOR0_1_DMDATA_ADDRESS_HIGH +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L +#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L +//CURSOR0_1_DMDATA_ADDRESS_LOW +#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0 +#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL +//CURSOR0_1_DMDATA_CNTL +#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0 +#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1 +#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2 +#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10 +#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L +#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L +#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L +#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L +//CURSOR0_1_DMDATA_QOS_CNTL +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0 +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4 +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10 +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L +#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L +//CURSOR0_1_DMDATA_STATUS +#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0 +#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2 +#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4 +#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L +#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L +#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L +//CURSOR0_1_DMDATA_SW_CNTL +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0 +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1 +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10 +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L +#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L +//CURSOR0_1_DMDATA_SW_DATA +#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0 +#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL +// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec +//HUBP2_DCSURF_SURFACE_CONFIG +#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8 +#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa +#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L +#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L +//HUBP2_DCSURF_ADDR_CONFIG +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3 +#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6 +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8 +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa +#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L +#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L +#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L +#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L +//HUBP2_DCSURF_TILING_CONFIG +#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0 +#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7 +#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9 +#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa +#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb +#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL +#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L +#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L +#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L +#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L +//HUBP2_DCSURF_PRI_VIEWPORT_START +#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP2_DCSURF_PRI_VIEWPORT_START_C +#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP2_DCSURF_SEC_VIEWPORT_START +#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP2_DCSURF_SEC_VIEWPORT_START_C +#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP2_DCHUBP_REQ_SIZE_CONFIG +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L +//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18 +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L +#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L +//HUBP2_DCHUBP_CNTL +#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0 +#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1 +#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2 +#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3 +#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4 +#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8 +#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9 +#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc +#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd +#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10 +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14 +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18 +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b +#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c +#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f +#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L +#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L +#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L +#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L +#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L +#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L +#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L +#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L +#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L +#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L +#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L +#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L +#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L +//HUBP2_HUBP_CLK_CNTL +#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16 +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17 +#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L +#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L +//HUBP2_HUBPREQ_DEBUG_DB +#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0 +#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL +//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L +//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14 +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L +#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L + + +// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec +//HUBPREQ2_DCSURF_SURFACE_PITCH +#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10 +#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL +#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L +//HUBPREQ2_DCSURF_SURFACE_PITCH_C +#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10 +#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL +#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L +//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SURFACE_CONTROL +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13 +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L +#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L +//HUBPREQ2_DCSURF_FLIP_CONTROL., +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14 +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L +#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L +//HUBPREQ2_DCSURF_FLIP_CONTROL2 +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0 +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8 +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9 +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L +#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L +//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13 +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L +#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L +//HUBPREQ2_DCSURF_SURFACE_INUSE +#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH +#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SURFACE_INUSE_C +#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C +#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL + +//HUBPREQ2_DCN_EXPANSION_MODE +#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0 +#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2 +#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4 +#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6 +#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L +#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL +#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L +#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L +//HUBPREQ2_DCN_TTU_QOS_WM +#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0 +#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10 +#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL +#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L +//HUBPREQ2_DCN_GLOBAL_TTU_CNTL +#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0 +#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c +#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL +#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L +//HUBPREQ2_DCN_SURF0_TTU_CNTL0 +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ2_DCN_SURF0_TTU_CNTL1 +#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ2_DCN_SURF1_TTU_CNTL0 +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ2_DCN_SURF1_TTU_CNTL1 +#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ2_DCN_CUR0_TTU_CNTL0 +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ2_DCN_CUR0_TTU_CNTL1 +#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ2_DCN_CUR1_TTU_CNTL0 +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ2_DCN_CUR1_TTU_CNTL1 +#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ2_BLANK_OFFSET_0 +#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0 +#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10 +#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL +#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L +//HUBPREQ2_BLANK_OFFSET_1 +#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0 +#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL +//HUBPREQ2_DST_DIMENSIONS +#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0 +#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL +//HUBPREQ2_DST_AFTER_SCALER +#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0 +#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10 +#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL +#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L +//HUBPREQ2_PREFETCH_SETTINGS +#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0 +#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18 +#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL +#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L +//HUBPREQ2_PREFETCH_SETTINGS_C +#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0 +#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL +//HUBPREQ2_VBLANK_PARAMETERS_0 +#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0 +#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8 +#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL +#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L +//HUBPREQ2_VBLANK_PARAMETERS_1 +#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0 +#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ2_VBLANK_PARAMETERS_2 +#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0 +#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ2_VBLANK_PARAMETERS_3 +#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0 +#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ2_VBLANK_PARAMETERS_4 +#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0 +#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ2_FLIP_PARAMETERS_0 +#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0 +#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8 +#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL +#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L +//HUBPREQ2_FLIP_PARAMETERS_2 +#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0 +#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL +//HUBPREQ2_NOM_PARAMETERS_4 +#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0 +#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL +//HUBPREQ2_NOM_PARAMETERS_5 +#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0 +#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL +//HUBPREQ2_NOM_PARAMETERS_6 +#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0 +#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL +//HUBPREQ2_NOM_PARAMETERS_7 +#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0 +#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL +//HUBPREQ2_PER_LINE_DELIVERY_PRE +#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0 +#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10 +#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL +#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L +//HUBPREQ2_PER_LINE_DELIVERY +#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0 +#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10 +#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL +#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L +//HUBPREQ2_CURSOR_SETTINGS +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0 +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8 +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10 +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18 +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L +#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L +//HUBPREQ2_REF_FREQ_TO_PIX_FREQ +#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0 +#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL +//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT +#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0 +#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL +//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4 +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6 +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L +#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L +//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6 +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L +#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L +// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec +//HUBPRET2_HUBPRET_CONTROL +#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0 +#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10 +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12 +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14 +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16 +#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18 +#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL +#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L +#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L +#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L +//HUBPRET2_HUBPRET_MEM_PWR_CTRL +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14 +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L +#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L +//HUBPRET2_HUBPRET_MEM_PWR_STATUS +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L +//HUBPRET2_HUBPRET_READ_LINE_CTRL0 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL +#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L +//HUBPRET2_HUBPRET_READ_LINE_CTRL1 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10 +#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL +#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L +//HUBPRET2_HUBPRET_READ_LINE0 +#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10 +#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL +#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L +//HUBPRET2_HUBPRET_READ_LINE1 +#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10 +#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL +#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L +//HUBPRET2_HUBPRET_INTERRUPT +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12 +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L +#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L +//HUBPRET2_HUBPRET_READ_LINE_VALUE +#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10 +#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL +#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L +//HUBPRET2_HUBPRET_READ_LINE_STATUS +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0 +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4 +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5 +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8 +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L +#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L +// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec +//CURSOR0_2_CURSOR_CONTROL +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18 +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L +#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L +//CURSOR0_2_CURSOR_SURFACE_ADDRESS +#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0 +#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH +#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//CURSOR0_2_CURSOR_SIZE +#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0 +#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10 +#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL +#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L +//CURSOR0_2_CURSOR_POSITION +#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0 +#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10 +#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL +#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L +//CURSOR0_2_CURSOR_HOT_SPOT +#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0 +#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10 +#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL +#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L +//CURSOR0_2_CURSOR_STEREO_CONTROL +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0 +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4 +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12 +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L +#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L +//CURSOR0_2_CURSOR_DST_OFFSET +#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0 +#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL +//CURSOR0_2_CURSOR_MEM_PWR_CTRL +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0 +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2 +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4 +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L +#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L +//CURSOR0_2_CURSOR_MEM_PWR_STATUS +#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0 +#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L +//CURSOR0_2_DMDATA_ADDRESS_HIGH +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L +#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L +//CURSOR0_2_DMDATA_ADDRESS_LOW +#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0 +#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL +//CURSOR0_2_DMDATA_CNTL +#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0 +#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1 +#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2 +#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10 +#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L +#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L +#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L +#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L +//CURSOR0_2_DMDATA_QOS_CNTL +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0 +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4 +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10 +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L +#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L +//CURSOR0_2_DMDATA_STATUS +#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0 +#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2 +#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4 +#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L +#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L +#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L +//CURSOR0_2_DMDATA_SW_CNTL +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0 +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1 +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10 +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L +#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L +//CURSOR0_2_DMDATA_SW_DATA +#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0 +#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec +//HUBP3_DCSURF_SURFACE_CONFIG +#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8 +#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa +#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L +#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L +//HUBP3_DCSURF_ADDR_CONFIG +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3 +#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6 +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8 +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa +#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L +#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L +#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L +#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L +//HUBP3_DCSURF_TILING_CONFIG +#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0 +#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7 +#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9 +#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa +#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb +#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL +#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L +#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L +#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L +#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L +//HUBP3_DCSURF_PRI_VIEWPORT_START +#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP3_DCSURF_PRI_VIEWPORT_START_C +#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP3_DCSURF_SEC_VIEWPORT_START +#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0 +#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10 +#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL +#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L +//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L +//HUBP3_DCSURF_SEC_VIEWPORT_START_C +#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0 +#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10 +#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL +#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L +//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0 +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10 +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL +#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L +//HUBP3_DCHUBP_REQ_SIZE_CONFIG +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L +//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18 +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L +#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L +//HUBP3_DCHUBP_CNTL +#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0 +#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1 +#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2 +#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3 +#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4 +#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8 +#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9 +#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc +#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd +#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10 +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14 +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18 +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b +#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c +#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f +#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L +#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L +#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L +#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L +#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L +#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L +#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L +#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L +#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L +#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L +#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L +#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L +#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L +//HUBP3_HUBP_CLK_CNTL +#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16 +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17 +#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L +#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L +//HUBP3_HUBPREQ_DEBUG_DB +#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0 +#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL +//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L +//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14 +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L +#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L + + +// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec +//HUBPREQ3_DCSURF_SURFACE_PITCH +#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10 +#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL +#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L +//HUBPREQ3_DCSURF_SURFACE_PITCH_C +#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10 +#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL +#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L +//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SURFACE_CONTROL +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13 +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L +#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L +//HUBPREQ3_DCSURF_FLIP_CONTROL +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14 +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L +#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L +//HUBPREQ3_DCSURF_FLIP_CONTROL2 +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0 +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8 +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9 +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L +#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L +//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13 +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L +#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L +//HUBPREQ3_DCSURF_SURFACE_INUSE +#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH +#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SURFACE_INUSE_C +#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C +#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL +//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL +//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0 +#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL +//HUBPREQ3_DCN_EXPANSION_MODE +#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0 +#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2 +#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4 +#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6 +#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L +#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL +#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L +#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L +//HUBPREQ3_DCN_TTU_QOS_WM +#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0 +#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10 +#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL +#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L +//HUBPREQ3_DCN_GLOBAL_TTU_CNTL +#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0 +#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c +#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL +#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L +//HUBPREQ3_DCN_SURF0_TTU_CNTL0 +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ3_DCN_SURF0_TTU_CNTL1 +#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ3_DCN_SURF1_TTU_CNTL0 +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ3_DCN_SURF1_TTU_CNTL1 +#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ3_DCN_CUR0_TTU_CNTL0 +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ3_DCN_CUR0_TTU_CNTL1 +#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ3_DCN_CUR1_TTU_CNTL0 +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0 +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18 +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L +#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L +//HUBPREQ3_DCN_CUR1_TTU_CNTL1 +#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0 +#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL +//HUBPREQ3_BLANK_OFFSET_0 +#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0 +#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10 +#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL +#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L +//HUBPREQ3_BLANK_OFFSET_1 +#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0 +#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL +//HUBPREQ3_DST_DIMENSIONS +#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0 +#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL +//HUBPREQ3_DST_AFTER_SCALER +#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0 +#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10 +#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL +#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L +//HUBPREQ3_PREFETCH_SETTINGS +#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0 +#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18 +#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL +#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L +//HUBPREQ3_PREFETCH_SETTINGS_C +#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0 +#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL +//HUBPREQ3_VBLANK_PARAMETERS_0 +#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0 +#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8 +#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL +#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L +//HUBPREQ3_VBLANK_PARAMETERS_1 +#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0 +#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ3_VBLANK_PARAMETERS_2 +#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0 +#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ3_VBLANK_PARAMETERS_3 +#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0 +#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL +//HUBPREQ3_VBLANK_PARAMETERS_4 +#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0 +#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL +//HUBPREQ3_FLIP_PARAMETERS_0 +#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0 +#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8 +#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL +#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L +//HUBPREQ3_FLIP_PARAMETERS_2 +#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0 +#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL +//HUBPREQ3_NOM_PARAMETERS_4 +#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0 +#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL +//HUBPREQ3_NOM_PARAMETERS_5 +#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0 +#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL +//HUBPREQ3_NOM_PARAMETERS_6 +#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0 +#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL +//HUBPREQ3_NOM_PARAMETERS_7 +#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0 +#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL +//HUBPREQ3_PER_LINE_DELIVERY_PRE +#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0 +#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10 +#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL +#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L +//HUBPREQ3_PER_LINE_DELIVERY +#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0 +#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10 +#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL +#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L +//HUBPREQ3_CURSOR_SETTINGS +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0 +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8 +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10 +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18 +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L +#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L +//HUBPREQ3_REF_FREQ_TO_PIX_FREQ +#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0 +#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL +//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT +#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0 +#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL +//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4 +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6 +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L +#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L +//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6 +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L +#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L +// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec +//HUBPRET3_HUBPRET_CONTROL +#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0 +#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10 +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12 +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14 +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16 +#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18 +#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL +#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L +#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L +#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L +//HUBPRET3_HUBPRET_MEM_PWR_CTRL +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14 +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L +#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L +//HUBPRET3_HUBPRET_MEM_PWR_STATUS +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0 +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2 +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4 +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL +#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L +//HUBPRET3_HUBPRET_READ_LINE_CTRL0 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL +#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L +//HUBPRET3_HUBPRET_READ_LINE_CTRL1 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10 +#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL +#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L +//HUBPRET3_HUBPRET_READ_LINE0 +#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10 +#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL +#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L +//HUBPRET3_HUBPRET_READ_LINE1 +#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10 +#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL +#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L +//HUBPRET3_HUBPRET_INTERRUPT +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12 +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L +#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L +//HUBPRET3_HUBPRET_READ_LINE_VALUE +#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10 +#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL +#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L +//HUBPRET3_HUBPRET_READ_LINE_STATUS +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0 +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4 +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5 +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8 +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L +#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L + + +// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec +//CURSOR0_3_CURSOR_CONTROL +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18 +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L +#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L +//CURSOR0_3_CURSOR_SURFACE_ADDRESS +#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0 +#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL +//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH +#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL +//CURSOR0_3_CURSOR_SIZE +#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0 +#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10 +#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL +#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L +//CURSOR0_3_CURSOR_POSITION +#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0 +#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10 +#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL +#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L +//CURSOR0_3_CURSOR_HOT_SPOT +#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0 +#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10 +#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL +#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L +//CURSOR0_3_CURSOR_STEREO_CONTROL +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0 +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4 +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12 +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L +#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L +//CURSOR0_3_CURSOR_DST_OFFSET +#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0 +#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL +//CURSOR0_3_CURSOR_MEM_PWR_CTRL +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0 +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2 +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4 +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L +#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L +//CURSOR0_3_CURSOR_MEM_PWR_STATUS +#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0 +#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L +//CURSOR0_3_DMDATA_ADDRESS_HIGH +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0 +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L +#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L +//CURSOR0_3_DMDATA_ADDRESS_LOW +#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0 +#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL +//CURSOR0_3_DMDATA_CNTL +#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0 +#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1 +#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2 +#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10 +#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L +#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L +#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L +#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L +//CURSOR0_3_DMDATA_QOS_CNTL +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0 +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4 +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10 +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L +#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L +//CURSOR0_3_DMDATA_STATUS +#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0 +#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2 +#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4 +#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L +#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L +#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L +//CURSOR0_3_DMDATA_SW_CNTL +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0 +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1 +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10 +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L +#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L +//CURSOR0_3_DMDATA_SW_DATA +#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0 +#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL +// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec +//DPP_TOP0_DPP_CONTROL +#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4 +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8 +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe +#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10 +#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12 +#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14 +#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L +#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L +#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L +#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L +#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L +//DPP_TOP0_DPP_SOFT_RESET +#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0 +#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4 +#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8 +#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc +#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L +#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L +#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L +#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L +//DPP_TOP0_DPP_CRC_VAL_R_G +#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0 +#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10 +#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL +#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L +//DPP_TOP0_DPP_CRC_VAL_B_A +#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0 +#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10 +#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL +#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L +//DPP_TOP0_DPP_CRC_CTRL +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10 +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L +#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L +//DPP_TOP0_HOST_READ_CONTROL +#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0 +#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL + + +// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec +//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT +#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +//CNVC_CFG0_FORMAT_CONTROL +#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0 +#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4 +#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8 +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd +#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10 +#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11 +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14 +#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L +#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L +#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L +#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L +#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L +#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L +//CNVC_CFG0_FCNV_FP_BIAS_R +#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL +//CNVC_CFG0_FCNV_FP_BIAS_G +#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL +//CNVC_CFG0_FCNV_FP_BIAS_B +#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL +//CNVC_CFG0_FCNV_FP_SCALE_R +#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL +//CNVC_CFG0_FCNV_FP_SCALE_G +#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL +//CNVC_CFG0_FCNV_FP_SCALE_B +#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0 +#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL +//CNVC_CFG0_COLOR_KEYER_CONTROL +#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0 +#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4 +#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L +#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L +//CNVC_CFG0_COLOR_KEYER_ALPHA +#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0 +#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10 +#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL +#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L +//CNVC_CFG0_COLOR_KEYER_RED +#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0 +#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10 +#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL +#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L +//CNVC_CFG0_COLOR_KEYER_GREEN +#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0 +#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10 +#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL +#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L +//CNVC_CFG0_COLOR_KEYER_BLUE +#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0 +#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10 +#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL +#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L +//CNVC_CFG0_ALPHA_2BIT_LUT +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0 +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8 +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10 +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18 +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L +#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L + + +// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec +//CNVC_CUR0_CURSOR0_CONTROL +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10 +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L +#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L +//CNVC_CUR0_CURSOR0_COLOR0 +#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0 +#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL +//CNVC_CUR0_CURSOR0_COLOR1 +#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0 +#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL +//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS +#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0 +#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10 +#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL +#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L + + +// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec +//DSCL0_SCL_COEF_RAM_TAP_SELECT +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0 +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8 +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10 +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L +#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L +//DSCL0_SCL_COEF_RAM_TAP_DATA +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0 +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10 +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L +#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L +//DSCL0_SCL_MODE +#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0 +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8 +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc +#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10 +#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14 +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18 +#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L +#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L +#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L +#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L +//DSCL0_SCL_TAP_CONTROL +#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0 +#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4 +#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8 +#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc +#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L +#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L +#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L +#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L +//DSCL0_DSCL_CONTROL +#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0 +#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L +//DSCL0_DSCL_2TAP_CONTROL +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18 +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L +#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L +#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L +//DSCL0_SCL_MANUAL_REPLICATE_CONTROL +#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0 +#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8 +#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL +#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L +//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO +#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0 +#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL0_SCL_HORZ_FILTER_INIT +#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0 +#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18 +#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L +//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C +#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL0_SCL_HORZ_FILTER_INIT_C +#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0 +#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18 +#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L +//DSCL0_SCL_VERT_FILTER_SCALE_RATIO +#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL0_SCL_VERT_FILTER_INIT +#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18 +#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L +//DSCL0_SCL_VERT_FILTER_INIT_BOT +#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18 +#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL +#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L +//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C +#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL0_SCL_VERT_FILTER_INIT_C +#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18 +#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L +//DSCL0_SCL_VERT_FILTER_INIT_BOT_C +#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0 +#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18 +#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL +#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L +//DSCL0_SCL_BLACK_OFFSET +#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0 +#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10 +#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL +#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L +//DSCL0_DSCL_UPDATE +#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0 +#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L +//DSCL0_DSCL_AUTOCAL +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0 +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8 +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L +#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L +//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT +#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0 +#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10 +#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL +#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L +//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM +#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0 +#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10 +#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL +#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L +//DSCL0_OTG_H_BLANK +#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0 +#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10 +#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL +#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L +//DSCL0_OTG_V_BLANK +#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0 +#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10 +#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL +#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L +//DSCL0_RECOUT_START +#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0 +#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10 +#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL +#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L +//DSCL0_RECOUT_SIZE +#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0 +#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10 +#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL +#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L +//DSCL0_MPC_SIZE +#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0 +#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10 +#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL +#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L +//DSCL0_LB_DATA_FORMAT +#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0 +#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4 +#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L +#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L +//DSCL0_LB_MEMORY_CTRL +#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0 +#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8 +#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10 +#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18 +#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L +#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L +#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L +#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L +//DSCL0_LB_V_COUNTER +#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0 +#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10 +#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL +#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L +//DSCL0_DSCL_MEM_PWR_CTRL +#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18 +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c +#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L +#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L +//DSCL0_DSCL_MEM_PWR_STATUS +#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0 +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2 +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4 +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6 +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8 +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc +#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L +#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L +//DSCL0_OBUF_CONTROL +#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0 +#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4 +#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc +#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c +#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L +#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L +#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L +#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L +//DSCL0_OBUF_MEM_PWR_CTRL +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8 +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10 +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L +#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L + + +// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec +//CM0_CM_CONTROL +#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0 +#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8 +#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L +#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L +//CM0_CM_ICSC_CONTROL +#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0 +#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L +//CM0_CM_ICSC_C11_C12 +#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0 +#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10 +#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L +//CM0_CM_ICSC_C13_C14 +#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0 +#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10 +#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L +//CM0_CM_ICSC_C21_C22 +#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0 +#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10 +#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L +//CM0_CM_ICSC_C23_C24 +#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0 +#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10 +#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L +//CM0_CM_ICSC_C31_C32 +#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0 +#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10 +#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L +//CM0_CM_ICSC_C33_C34 +#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0 +#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10 +#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL +#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C11_C12 +#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0 +#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10 +#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C13_C14 +#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0 +#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10 +#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C21_C22 +#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0 +#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10 +#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C23_C24 +#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0 +#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10 +#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C31_C32 +#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0 +#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10 +#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L +//CM0_CM_ICSC_B_C33_C34 +#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0 +#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10 +#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL +#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_CONTROL +#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L +//CM0_CM_GAMUT_REMAP_C11_C12 +#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_C13_C14 +#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_C21_C22 +#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_C23_C24 +#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_C31_C32 +#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_C33_C34 +#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C11_C12 +#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C13_C14 +#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C21_C22 +#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C23_C24 +#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C31_C32 +#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L +//CM0_CM_GAMUT_REMAP_B_C33_C34 +#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0 +#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10 +#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL +#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L +//CM0_CM_BIAS_CR_R +#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0 +#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL +//CM0_CM_BIAS_Y_G_CB_B +#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0 +#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10 +#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL +#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L +//CM0_CM_DGAM_CONTROL +#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0 +#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L +//CM0_CM_DGAM_LUT_INDEX +#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0 +#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL +//CM0_CM_DGAM_LUT_DATA +#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0 +#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL +//CM0_CM_DGAM_LUT_WRITE_EN_MASK +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L +#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L +//CM0_CM_DGAM_RAMA_START_CNTL_B +#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_DGAM_RAMA_START_CNTL_G +#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_DGAM_RAMA_START_CNTL_R +#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_DGAM_RAMA_SLOPE_CNTL_B +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMA_SLOPE_CNTL_G +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMA_SLOPE_CNTL_R +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMA_END_CNTL1_B +#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMA_END_CNTL2_B +#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMA_END_CNTL1_G +#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMA_END_CNTL2_G +#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMA_END_CNTL1_R +#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMA_END_CNTL2_R +#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMA_REGION_0_1 +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_2_3 +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_4_5 +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_6_7 +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_8_9 +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_10_11 +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_12_13 +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMA_REGION_14_15 +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_START_CNTL_B +#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_DGAM_RAMB_START_CNTL_G +#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_DGAM_RAMB_START_CNTL_R +#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_DGAM_RAMB_SLOPE_CNTL_B +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMB_SLOPE_CNTL_G +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMB_SLOPE_CNTL_R +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM0_CM_DGAM_RAMB_END_CNTL1_B +#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMB_END_CNTL2_B +#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMB_END_CNTL1_G +#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMB_END_CNTL2_G +#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMB_END_CNTL1_R +#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM0_CM_DGAM_RAMB_END_CNTL2_R +#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM0_CM_DGAM_RAMB_REGION_0_1 +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_2_3 +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_4_5 +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_6_7 +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_8_9 +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_10_11 +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_12_13 +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_DGAM_RAMB_REGION_14_15 +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_CONTROL +#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0 +#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L +//CM0_CM_BLNDGAM_LUT_INDEX +#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0 +#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL +//CM0_CM_BLNDGAM_LUT_DATA +#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0 +#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL +//CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L +//CM0_CM_BLNDGAM_RAMA_START_CNTL_B +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMA_START_CNTL_G +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMA_START_CNTL_R +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMA_END_CNTL1_B +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMA_END_CNTL2_B +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMA_END_CNTL1_G +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMA_END_CNTL2_G +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMA_END_CNTL1_R +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMA_END_CNTL2_R +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMA_REGION_0_1 +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_2_3 +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_4_5 +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_6_7 +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_8_9 +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_10_11 +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_12_13 +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_14_15 +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_16_17 +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_18_19 +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_20_21 +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_22_23 +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_24_25 +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_26_27 +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_28_29 +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_30_31 +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMA_REGION_32_33 +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_START_CNTL_B +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMB_START_CNTL_G +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMB_START_CNTL_R +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM0_CM_BLNDGAM_RAMB_END_CNTL1_B +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMB_END_CNTL2_B +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMB_END_CNTL1_G +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMB_END_CNTL2_G +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMB_END_CNTL1_R +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM0_CM_BLNDGAM_RAMB_END_CNTL2_R +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM0_CM_BLNDGAM_RAMB_REGION_0_1 +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_2_3 +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_4_5 +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_6_7 +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_8_9 +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_10_11 +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_12_13 +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_14_15 +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_16_17 +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_18_19 +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_20_21 +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_22_23 +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_24_25 +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_26_27 +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_28_29 +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_30_31 +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_BLNDGAM_RAMB_REGION_32_33 +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_HDR_MULT_COEF +#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0 +#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL +//CM0_CM_MEM_PWR_CTRL +#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0 +#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2 +#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4 +#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6 +#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L +#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L +#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L +#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L +//CM0_CM_MEM_PWR_STATUS +#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0 +#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2 +#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L +#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL +//CM0_CM_DEALPHA +#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0 +#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L +//CM0_CM_COEF_FORMAT +#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0 +#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4 +#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8 +#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L +#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L +#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM0_CM_SHAPER_CONTROL +#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 +#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L +//CM0_CM_SHAPER_OFFSET_R +#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0 +#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL +//CM0_CM_SHAPER_OFFSET_G +#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0 +#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL +//CM0_CM_SHAPER_OFFSET_B +#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0 +#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL +//CM0_CM_SHAPER_SCALE_R +#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0 +#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL +//CM0_CM_SHAPER_SCALE_G_B +#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0 +#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10 +#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_SHAPER_LUT_INDEX +#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0 +#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL +//CM0_CM_SHAPER_LUT_DATA +#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0 +#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL +//CM0_CM_SHAPER_LUT_WRITE_EN_MASK +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4 +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8 +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L +#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L +//CM0_CM_SHAPER_RAMA_START_CNTL_B +#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMA_START_CNTL_G +#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMA_START_CNTL_R +#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMA_END_CNTL_B +#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMA_END_CNTL_G +#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMA_END_CNTL_R +#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMA_REGION_0_1 +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_2_3 +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_4_5 +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_6_7 +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_8_9 +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_10_11 +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_12_13 +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_14_15 +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_16_17 +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_18_19 +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_20_21 +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_22_23 +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_24_25 +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_26_27 +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_28_29 +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_30_31 +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMA_REGION_32_33 +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_START_CNTL_B +#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMB_START_CNTL_G +#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMB_START_CNTL_R +#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM0_CM_SHAPER_RAMB_END_CNTL_B +#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMB_END_CNTL_G +#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMB_END_CNTL_R +#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM0_CM_SHAPER_RAMB_REGION_0_1 +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_2_3 +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_4_5 +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_6_7 +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_8_9 +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_10_11 +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_12_13 +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_14_15 +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_16_17 +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_18_19 +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_20_21 +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_22_23 +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_24_25 +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_26_27 +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_28_29 +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_30_31 +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_SHAPER_RAMB_REGION_32_33 +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM0_CM_MEM_PWR_CTRL2 +#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8 +#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa +#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc +#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe +#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L +#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L +#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L +#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L +//CM0_CM_MEM_PWR_STATUS2 +#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4 +#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6 +#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L +#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L +//CM0_CM_3DLUT_MODE +#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0 +#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4 +#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L +#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L +//CM0_CM_3DLUT_INDEX +#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0 +#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL +//CM0_CM_3DLUT_DATA +#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0 +#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10 +#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL +#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L +//CM0_CM_3DLUT_DATA_30BIT +#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2 +#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL +//CM0_CM_3DLUT_READ_WRITE_CONTROL +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4 +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8 +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10 +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L +#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L +//CM0_CM_3DLUT_OUT_NORM_FACTOR +#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0 +#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL +//CM0_CM_3DLUT_OUT_OFFSET_R +#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0 +#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10 +#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL +#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L +//CM0_CM_3DLUT_OUT_OFFSET_G +#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0 +#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10 +#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL +#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L +//CM0_CM_3DLUT_OUT_OFFSET_B +#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0 +#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 +#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL +#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL +// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec +//DPP_TOP1_DPP_CONTROL +#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4 +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8 +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe +#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10 +#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12 +#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14 +#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L +#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L +#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L +#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L +#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L +//DPP_TOP1_DPP_SOFT_RESET +#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0 +#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4 +#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8 +#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc +#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L +#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L +#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L +#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L +//DPP_TOP1_DPP_CRC_VAL_R_G +#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0 +#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10 +#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL +#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L +//DPP_TOP1_DPP_CRC_VAL_B_A +#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0 +#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10 +#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL +#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L +//DPP_TOP1_DPP_CRC_CTRL +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10 +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L +#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L +//DPP_TOP1_HOST_READ_CONTROL +#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0 +#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL + + +// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec +//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT +#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +//CNVC_CFG1_FORMAT_CONTROL +#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0 +#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4 +#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8 +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd +#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10 +#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11 +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14 +#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L +#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L +#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L +#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L +#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L +#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L +//CNVC_CFG1_FCNV_FP_BIAS_R +#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL +//CNVC_CFG1_FCNV_FP_BIAS_G +#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL +//CNVC_CFG1_FCNV_FP_BIAS_B +#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL +//CNVC_CFG1_FCNV_FP_SCALE_R +#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL +//CNVC_CFG1_FCNV_FP_SCALE_G +#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL +//CNVC_CFG1_FCNV_FP_SCALE_B +#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0 +#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL +//CNVC_CFG1_COLOR_KEYER_CONTROL +#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0 +#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4 +#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L +#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L +//CNVC_CFG1_COLOR_KEYER_ALPHA +#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0 +#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10 +#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL +#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L +//CNVC_CFG1_COLOR_KEYER_RED +#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0 +#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10 +#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL +#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L +//CNVC_CFG1_COLOR_KEYER_GREEN +#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0 +#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10 +#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL +#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L +//CNVC_CFG1_COLOR_KEYER_BLUE +#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0 +#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10 +#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL +#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L +//CNVC_CFG1_ALPHA_2BIT_LUT +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0 +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8 +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10 +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18 +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L +#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L + + +// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec +//CNVC_CUR1_CURSOR0_CONTROL +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10 +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L +#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L +//CNVC_CUR1_CURSOR0_COLOR0 +#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0 +#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL +//CNVC_CUR1_CURSOR0_COLOR1 +#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0 +#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL +//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS +#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0 +#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10 +#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL +#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L + + +// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec +//DSCL1_SCL_COEF_RAM_TAP_SELECT +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0 +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8 +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10 +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L +#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L +//DSCL1_SCL_COEF_RAM_TAP_DATA +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0 +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10 +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L +#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L +//DSCL1_SCL_MODE +#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0 +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8 +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc +#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10 +#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14 +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18 +#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L +#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L +#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L +#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L +//DSCL1_SCL_TAP_CONTROL +#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0 +#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4 +#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8 +#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc +#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L +#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L +#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L +#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L +//DSCL1_DSCL_CONTROL +#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0 +#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L +//DSCL1_DSCL_2TAP_CONTROL +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18 +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L +#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L +#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L +//DSCL1_SCL_MANUAL_REPLICATE_CONTROL +#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0 +#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8 +#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL +#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L +//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO +#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0 +#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL1_SCL_HORZ_FILTER_INIT +#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0 +#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18 +#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L +//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C +#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL1_SCL_HORZ_FILTER_INIT_C +#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0 +#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18 +#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L +//DSCL1_SCL_VERT_FILTER_SCALE_RATIO +#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL1_SCL_VERT_FILTER_INIT +#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18 +#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L +//DSCL1_SCL_VERT_FILTER_INIT_BOT +#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18 +#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL +#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L +//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C +#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL1_SCL_VERT_FILTER_INIT_C +#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18 +#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L +//DSCL1_SCL_VERT_FILTER_INIT_BOT_C +#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0 +#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18 +#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL +#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L +//DSCL1_SCL_BLACK_OFFSET +#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0 +#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10 +#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL +#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L +//DSCL1_DSCL_UPDATE +#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0 +#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L +//DSCL1_DSCL_AUTOCAL +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0 +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8 +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L +#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L +//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT +#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0 +#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10 +#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL +#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L +//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM +#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0 +#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10 +#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL +#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L +//DSCL1_OTG_H_BLANK +#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0 +#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10 +#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL +#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L +//DSCL1_OTG_V_BLANK +#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0 +#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10 +#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL +#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L +//DSCL1_RECOUT_START +#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0 +#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10 +#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL +#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L +//DSCL1_RECOUT_SIZE +#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0 +#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10 +#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL +#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L +//DSCL1_MPC_SIZE +#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0 +#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10 +#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL +#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L +//DSCL1_LB_DATA_FORMAT +#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0 +#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4 +#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L +#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L +//DSCL1_LB_MEMORY_CTRL +#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0 +#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8 +#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10 +#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18 +#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L +#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L +#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L +#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L +//DSCL1_LB_V_COUNTER +#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0 +#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10 +#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL +#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L +//DSCL1_DSCL_MEM_PWR_CTRL +#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18 +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c +#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L +#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L +//DSCL1_DSCL_MEM_PWR_STATUS +#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0 +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2 +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4 +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6 +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8 +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc +#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L +#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L +//DSCL1_OBUF_CONTROL +#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0 +#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4 +#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc +#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c +#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L +#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L +#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L +#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L +//DSCL1_OBUF_MEM_PWR_CTRL +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8 +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10 +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L +#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L + + +// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec +//CM1_CM_CONTROL +#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0 +#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8 +#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L +#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L +//CM1_CM_ICSC_CONTROL +#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0 +#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L +//CM1_CM_ICSC_C11_C12 +#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0 +#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10 +#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L +//CM1_CM_ICSC_C13_C14 +#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0 +#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10 +#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L +//CM1_CM_ICSC_C21_C22 +#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0 +#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10 +#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L +//CM1_CM_ICSC_C23_C24 +#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0 +#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10 +#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L +//CM1_CM_ICSC_C31_C32 +#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0 +#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10 +#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L +//CM1_CM_ICSC_C33_C34 +#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0 +#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10 +#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL +#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C11_C12 +#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0 +#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10 +#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C13_C14 +#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0 +#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10 +#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C21_C22 +#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0 +#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10 +#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C23_C24 +#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0 +#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10 +#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C31_C32 +#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0 +#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10 +#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L +//CM1_CM_ICSC_B_C33_C34 +#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0 +#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10 +#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL +#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_CONTROL +#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L +//CM1_CM_GAMUT_REMAP_C11_C12 +#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_C13_C14 +#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_C21_C22 +#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_C23_C24 +#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_C31_C32 +#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_C33_C34 +#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C11_C12 +#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C13_C14 +#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C21_C22 +#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C23_C24 +#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C31_C32 +#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L +//CM1_CM_GAMUT_REMAP_B_C33_C34 +#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0 +#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10 +#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL +#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L +//CM1_CM_BIAS_CR_R +#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0 +#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL +//CM1_CM_BIAS_Y_G_CB_B +#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0 +#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10 +#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL +#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L +//CM1_CM_DGAM_CONTROL +#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0 +#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L +//CM1_CM_DGAM_LUT_INDEX +#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0 +#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL +//CM1_CM_DGAM_LUT_DATA +#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0 +#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL +//CM1_CM_DGAM_LUT_WRITE_EN_MASK +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L +#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L +//CM1_CM_DGAM_RAMA_START_CNTL_B +#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_DGAM_RAMA_START_CNTL_G +#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_DGAM_RAMA_START_CNTL_R +#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_DGAM_RAMA_SLOPE_CNTL_B +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMA_SLOPE_CNTL_G +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMA_SLOPE_CNTL_R +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMA_END_CNTL1_B +#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMA_END_CNTL2_B +#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMA_END_CNTL1_G +#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMA_END_CNTL2_G +#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMA_END_CNTL1_R +#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMA_END_CNTL2_R +#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMA_REGION_0_1 +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_2_3 +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_4_5 +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_6_7 +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_8_9 +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_10_11 +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_12_13 +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMA_REGION_14_15 +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_START_CNTL_B +#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_DGAM_RAMB_START_CNTL_G +#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_DGAM_RAMB_START_CNTL_R +#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_DGAM_RAMB_SLOPE_CNTL_B +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMB_SLOPE_CNTL_G +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMB_SLOPE_CNTL_R +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM1_CM_DGAM_RAMB_END_CNTL1_B +#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMB_END_CNTL2_B +#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMB_END_CNTL1_G +#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMB_END_CNTL2_G +#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMB_END_CNTL1_R +#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM1_CM_DGAM_RAMB_END_CNTL2_R +#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM1_CM_DGAM_RAMB_REGION_0_1 +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_2_3 +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_4_5 +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_6_7 +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_8_9 +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_10_11 +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_12_13 +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_DGAM_RAMB_REGION_14_15 +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_CONTROL +#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0 +#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L +//CM1_CM_BLNDGAM_LUT_INDEX +#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0 +#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL +//CM1_CM_BLNDGAM_LUT_DATA +#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0 +#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL +//CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L +//CM1_CM_BLNDGAM_RAMA_START_CNTL_B +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMA_START_CNTL_G +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMA_START_CNTL_R +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMA_END_CNTL1_B +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMA_END_CNTL2_B +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMA_END_CNTL1_G +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMA_END_CNTL2_G +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMA_END_CNTL1_R +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMA_END_CNTL2_R +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMA_REGION_0_1 +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_2_3 +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_4_5 +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_6_7 +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_8_9 +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_10_11 +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_12_13 +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_14_15 +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_16_17 +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_18_19 +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_20_21 +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_22_23 +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_24_25 +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_26_27 +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_28_29 +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_30_31 +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMA_REGION_32_33 +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_START_CNTL_B +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMB_START_CNTL_G +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMB_START_CNTL_R +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM1_CM_BLNDGAM_RAMB_END_CNTL1_B +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMB_END_CNTL2_B +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMB_END_CNTL1_G +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMB_END_CNTL2_G +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMB_END_CNTL1_R +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM1_CM_BLNDGAM_RAMB_END_CNTL2_R +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM1_CM_BLNDGAM_RAMB_REGION_0_1 +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_2_3 +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_4_5 +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_6_7 +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_8_9 +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_10_11 +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_12_13 +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_14_15 +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_16_17 +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_18_19 +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_20_21 +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_22_23 +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_24_25 +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_26_27 +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_28_29 +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_30_31 +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_BLNDGAM_RAMB_REGION_32_33 +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_HDR_MULT_COEF +#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0 +#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL +//CM1_CM_MEM_PWR_CTRL +#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0 +#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2 +#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4 +#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6 +#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L +#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L +#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L +#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L +//CM1_CM_MEM_PWR_STATUS +#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0 +#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2 +#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L +#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL +//CM1_CM_DEALPHA +#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0 +#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L +//CM1_CM_COEF_FORMAT +#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0 +#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4 +#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8 +#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L +#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L +#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM1_CM_SHAPER_CONTROL +#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 +#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L +//CM1_CM_SHAPER_OFFSET_R +#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0 +#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL +//CM1_CM_SHAPER_OFFSET_G +#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0 +#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL +//CM1_CM_SHAPER_OFFSET_B +#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0 +#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL +//CM1_CM_SHAPER_SCALE_R +#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0 +#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL +//CM1_CM_SHAPER_SCALE_G_B +#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0 +#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10 +#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L +//CM1_CM_SHAPER_LUT_INDEX +#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0 +#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL +//CM1_CM_SHAPER_LUT_DATA +#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0 +#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL +//CM1_CM_SHAPER_LUT_WRITE_EN_MASK +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4 +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8 +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L +#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L +//CM1_CM_SHAPER_RAMA_START_CNTL_B +#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMA_START_CNTL_G +#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMA_START_CNTL_R +#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMA_END_CNTL_B +#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMA_END_CNTL_G +#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMA_END_CNTL_R +#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMA_REGION_0_1 +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_2_3 +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_4_5 +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_6_7 +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_8_9 +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_10_11 +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_12_13 +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_14_15 +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_16_17 +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_18_19 +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_20_21 +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_22_23 +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_24_25 +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_26_27 +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_28_29 +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_30_31 +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMA_REGION_32_33 +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_START_CNTL_B +#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMB_START_CNTL_G +#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMB_START_CNTL_R +#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM1_CM_SHAPER_RAMB_END_CNTL_B +#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMB_END_CNTL_G +#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMB_END_CNTL_R +#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM1_CM_SHAPER_RAMB_REGION_0_1 +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_2_3 +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_4_5 +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_6_7 +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_8_9 +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_10_11 +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_12_13 +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_14_15 +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_16_17 +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_18_19 +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_20_21 +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_22_23 +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_24_25 +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_26_27 +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_28_29 +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_30_31 +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_SHAPER_RAMB_REGION_32_33 +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM1_CM_MEM_PWR_CTRL2 +#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8 +#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa +#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc +#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe +#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L +#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L +#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L +#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L +//CM1_CM_MEM_PWR_STATUS2 +#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4 +#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6 +#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L +#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L +//CM1_CM_3DLUT_MODE +#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0 +#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4 +#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L +#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L +//CM1_CM_3DLUT_INDEX +#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0 +#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL +//CM1_CM_3DLUT_DATA +#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0 +#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10 +#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL +#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L +//CM1_CM_3DLUT_DATA_30BIT +#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2 +#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL +//CM1_CM_3DLUT_READ_WRITE_CONTROL +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4 +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8 +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10 +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L +#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L +//CM1_CM_3DLUT_OUT_NORM_FACTOR +#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0 +#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL +//CM1_CM_3DLUT_OUT_OFFSET_R +#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0 +#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10 +#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL +#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L +//CM1_CM_3DLUT_OUT_OFFSET_G +#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0 +#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10 +#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL +#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L +//CM1_CM_3DLUT_OUT_OFFSET_B +#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0 +#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 +#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL +#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec +//DPP_TOP2_DPP_CONTROL +#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4 +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8 +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe +#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10 +#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12 +#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14 +#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L +#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L +#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L +#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L +#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L +//DPP_TOP2_DPP_SOFT_RESET +#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0 +#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4 +#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8 +#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc +#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L +#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L +#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L +#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L +//DPP_TOP2_DPP_CRC_VAL_R_G +#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0 +#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10 +#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL +#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L +//DPP_TOP2_DPP_CRC_VAL_B_A +#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0 +#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10 +#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL +#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L +//DPP_TOP2_DPP_CRC_CTRL +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10 +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L +#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L +//DPP_TOP2_HOST_READ_CONTROL +#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0 +#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL + + +// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec +//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT +#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +//CNVC_CFG2_FORMAT_CONTROL +#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0 +#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4 +#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8 +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd +#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10 +#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11 +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14 +#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L +#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L +#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L +#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L +#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L +#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L +//CNVC_CFG2_FCNV_FP_BIAS_R +#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL +//CNVC_CFG2_FCNV_FP_BIAS_G +#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL +//CNVC_CFG2_FCNV_FP_BIAS_B +#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL +//CNVC_CFG2_FCNV_FP_SCALE_R +#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL +//CNVC_CFG2_FCNV_FP_SCALE_G +#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL +//CNVC_CFG2_FCNV_FP_SCALE_B +#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0 +#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL +//CNVC_CFG2_COLOR_KEYER_CONTROL +#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0 +#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4 +#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L +#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L +//CNVC_CFG2_COLOR_KEYER_ALPHA +#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0 +#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10 +#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL +#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L +//CNVC_CFG2_COLOR_KEYER_RED +#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0 +#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10 +#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL +#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L +//CNVC_CFG2_COLOR_KEYER_GREEN +#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0 +#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10 +#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL +#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L +//CNVC_CFG2_COLOR_KEYER_BLUE +#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0 +#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10 +#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL +#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L +//CNVC_CFG2_ALPHA_2BIT_LUT +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0 +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8 +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10 +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18 +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L +#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L + + +// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec +//CNVC_CUR2_CURSOR0_CONTROL +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10 +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L +#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L +//CNVC_CUR2_CURSOR0_COLOR0 +#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0 +#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL +//CNVC_CUR2_CURSOR0_COLOR1 +#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0 +#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL +//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS +#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0 +#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10 +#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL +#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L + + +// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec +//DSCL2_SCL_COEF_RAM_TAP_SELECT +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0 +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8 +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10 +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L +#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L +//DSCL2_SCL_COEF_RAM_TAP_DATA +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0 +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10 +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L +#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L +//DSCL2_SCL_MODE +#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0 +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8 +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc +#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10 +#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14 +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18 +#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L +#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L +#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L +#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L +//DSCL2_SCL_TAP_CONTROL +#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0 +#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4 +#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8 +#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc +#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L +#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L +#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L +#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L +//DSCL2_DSCL_CONTROL +#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0 +#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L +//DSCL2_DSCL_2TAP_CONTROL +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18 +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L +#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L +#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L +//DSCL2_SCL_MANUAL_REPLICATE_CONTROL +#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0 +#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8 +#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL +#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L +//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO +#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0 +#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL2_SCL_HORZ_FILTER_INIT +#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0 +#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18 +#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L +//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C +#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL2_SCL_HORZ_FILTER_INIT_C +#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0 +#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18 +#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L +//DSCL2_SCL_VERT_FILTER_SCALE_RATIO +#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL2_SCL_VERT_FILTER_INIT +#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18 +#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L +//DSCL2_SCL_VERT_FILTER_INIT_BOT +#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18 +#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL +#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L +//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C +#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL2_SCL_VERT_FILTER_INIT_C +#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18 +#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L +//DSCL2_SCL_VERT_FILTER_INIT_BOT_C +#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0 +#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18 +#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL +#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L +//DSCL2_SCL_BLACK_OFFSET +#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0 +#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10 +#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL +#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L +//DSCL2_DSCL_UPDATE +#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0 +#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L +//DSCL2_DSCL_AUTOCAL +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0 +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8 +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L +#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L +//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT +#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0 +#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10 +#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL +#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L +//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM +#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0 +#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10 +#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL +#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L +//DSCL2_OTG_H_BLANK +#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0 +#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10 +#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL +#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L +//DSCL2_OTG_V_BLANK +#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0 +#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10 +#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL +#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L +//DSCL2_RECOUT_START +#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0 +#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10 +#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL +#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L +//DSCL2_RECOUT_SIZE +#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0 +#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10 +#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL +#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L +//DSCL2_MPC_SIZE +#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0 +#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10 +#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL +#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L +//DSCL2_LB_DATA_FORMAT +#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0 +#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4 +#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L +#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L +//DSCL2_LB_MEMORY_CTRL +#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0 +#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8 +#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10 +#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18 +#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L +#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L +#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L +#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L +//DSCL2_LB_V_COUNTER +#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0 +#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10 +#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL +#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L +//DSCL2_DSCL_MEM_PWR_CTRL +#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18 +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c +#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L +#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L +//DSCL2_DSCL_MEM_PWR_STATUS +#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0 +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2 +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4 +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6 +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8 +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc +#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L +#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L +//DSCL2_OBUF_CONTROL +#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0 +#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4 +#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc +#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c +#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L +#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L +#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L +#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L +//DSCL2_OBUF_MEM_PWR_CTRL +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8 +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10 +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L +#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L + + +// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec +//CM2_CM_CONTROL +#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0 +#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8 +#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L +#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L +//CM2_CM_ICSC_CONTROL +#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0 +#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L +//CM2_CM_ICSC_C11_C12 +#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0 +#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10 +#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L +//CM2_CM_ICSC_C13_C14 +#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0 +#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10 +#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L +//CM2_CM_ICSC_C21_C22 +#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0 +#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10 +#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L +//CM2_CM_ICSC_C23_C24 +#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0 +#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10 +#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L +//CM2_CM_ICSC_C31_C32 +#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0 +#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10 +#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L +//CM2_CM_ICSC_C33_C34 +#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0 +#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10 +#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL +#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C11_C12 +#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0 +#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10 +#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C13_C14 +#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0 +#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10 +#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C21_C22 +#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0 +#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10 +#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C23_C24 +#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0 +#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10 +#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C31_C32 +#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0 +#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10 +#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L +//CM2_CM_ICSC_B_C33_C34 +#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0 +#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10 +#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL +#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_CONTROL +#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L +//CM2_CM_GAMUT_REMAP_C11_C12 +#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_C13_C14 +#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_C21_C22 +#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_C23_C24 +#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_C31_C32 +#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_C33_C34 +#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C11_C12 +#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C13_C14 +#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C21_C22 +#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C23_C24 +#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C31_C32 +#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L +//CM2_CM_GAMUT_REMAP_B_C33_C34 +#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0 +#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10 +#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL +#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L +//CM2_CM_BIAS_CR_R +#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0 +#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL +//CM2_CM_BIAS_Y_G_CB_B +#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0 +#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10 +#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL +#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L +//CM2_CM_DGAM_CONTROL +#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0 +#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L +//CM2_CM_DGAM_LUT_INDEX +#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0 +#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL +//CM2_CM_DGAM_LUT_DATA +#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0 +#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL +//CM2_CM_DGAM_LUT_WRITE_EN_MASK +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L +#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L +//CM2_CM_DGAM_RAMA_START_CNTL_B +#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_DGAM_RAMA_START_CNTL_G +#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_DGAM_RAMA_START_CNTL_R +#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_DGAM_RAMA_SLOPE_CNTL_B +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMA_SLOPE_CNTL_G +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMA_SLOPE_CNTL_R +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMA_END_CNTL1_B +#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMA_END_CNTL2_B +#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMA_END_CNTL1_G +#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMA_END_CNTL2_G +#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMA_END_CNTL1_R +#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMA_END_CNTL2_R +#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMA_REGION_0_1 +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_2_3 +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_4_5 +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_6_7 +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_8_9 +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_10_11 +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_12_13 +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMA_REGION_14_15 +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_START_CNTL_B +#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_DGAM_RAMB_START_CNTL_G +#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_DGAM_RAMB_START_CNTL_R +#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_DGAM_RAMB_SLOPE_CNTL_B +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMB_SLOPE_CNTL_G +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMB_SLOPE_CNTL_R +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM2_CM_DGAM_RAMB_END_CNTL1_B +#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMB_END_CNTL2_B +#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMB_END_CNTL1_G +#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMB_END_CNTL2_G +#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMB_END_CNTL1_R +#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM2_CM_DGAM_RAMB_END_CNTL2_R +#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM2_CM_DGAM_RAMB_REGION_0_1 +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_2_3 +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_4_5 +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_6_7 +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_8_9 +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_10_11 +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_12_13 +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_DGAM_RAMB_REGION_14_15 +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_CONTROL +#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0 +#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L +//CM2_CM_BLNDGAM_LUT_INDEX +#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0 +#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL +//CM2_CM_BLNDGAM_LUT_DATA +#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0 +#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL +//CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L +//CM2_CM_BLNDGAM_RAMA_START_CNTL_B +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMA_START_CNTL_G +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMA_START_CNTL_R +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMA_END_CNTL1_B +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMA_END_CNTL2_B +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMA_END_CNTL1_G +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMA_END_CNTL2_G +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMA_END_CNTL1_R +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMA_END_CNTL2_R +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMA_REGION_0_1 +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_2_3 +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_4_5 +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_6_7 +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_8_9 +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_10_11 +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_12_13 +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_14_15 +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_16_17 +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_18_19 +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_20_21 +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_22_23 +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_24_25 +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_26_27 +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_28_29 +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_30_31 +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMA_REGION_32_33 +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_START_CNTL_B +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMB_START_CNTL_G +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMB_START_CNTL_R +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM2_CM_BLNDGAM_RAMB_END_CNTL1_B +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMB_END_CNTL2_B +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMB_END_CNTL1_G +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMB_END_CNTL2_G +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMB_END_CNTL1_R +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM2_CM_BLNDGAM_RAMB_END_CNTL2_R +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM2_CM_BLNDGAM_RAMB_REGION_0_1 +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_2_3 +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_4_5 +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_6_7 +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_8_9 +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_10_11 +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_12_13 +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_14_15 +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_16_17 +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_18_19 +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_20_21 +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_22_23 +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_24_25 +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_26_27 +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_28_29 +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_30_31 +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_BLNDGAM_RAMB_REGION_32_33 +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_HDR_MULT_COEF +#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0 +#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL +//CM2_CM_MEM_PWR_CTRL +#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0 +#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2 +#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4 +#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6 +#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L +#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L +#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L +#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L +//CM2_CM_MEM_PWR_STATUS +#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0 +#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2 +#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L +#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL +//CM2_CM_DEALPHA +#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0 +#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L +//CM2_CM_COEF_FORMAT +#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0 +#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4 +#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8 +#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L +#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L +#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM2_CM_SHAPER_CONTROL +#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 +#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L +//CM2_CM_SHAPER_OFFSET_R +#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0 +#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL +//CM2_CM_SHAPER_OFFSET_G +#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0 +#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL +//CM2_CM_SHAPER_OFFSET_B +#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0 +#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL +//CM2_CM_SHAPER_SCALE_R +#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0 +#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL +//CM2_CM_SHAPER_SCALE_G_B +#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0 +#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10 +#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L +//CM2_CM_SHAPER_LUT_INDEX +#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0 +#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL +//CM2_CM_SHAPER_LUT_DATA +#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0 +#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL +//CM2_CM_SHAPER_LUT_WRITE_EN_MASK +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4 +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8 +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L +#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L +//CM2_CM_SHAPER_RAMA_START_CNTL_B +#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMA_START_CNTL_G +#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMA_START_CNTL_R +#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMA_END_CNTL_B +#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMA_END_CNTL_G +#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMA_END_CNTL_R +#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMA_REGION_0_1 +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_2_3 +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_4_5 +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_6_7 +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_8_9 +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_10_11 +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_12_13 +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_14_15 +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_16_17 +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_18_19 +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_20_21 +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_22_23 +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_24_25 +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_26_27 +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_28_29 +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_30_31 +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMA_REGION_32_33 +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_START_CNTL_B +#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMB_START_CNTL_G +#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMB_START_CNTL_R +#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM2_CM_SHAPER_RAMB_END_CNTL_B +#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMB_END_CNTL_G +#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMB_END_CNTL_R +#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM2_CM_SHAPER_RAMB_REGION_0_1 +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_2_3 +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_4_5 +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_6_7 +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_8_9 +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_10_11 +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_12_13 +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_14_15 +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_16_17 +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_18_19 +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_20_21 +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_22_23 +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_24_25 +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_26_27 +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_28_29 +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_30_31 +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_SHAPER_RAMB_REGION_32_33 +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM2_CM_MEM_PWR_CTRL2 +#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8 +#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa +#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc +#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe +#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L +#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L +#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L +#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L +//CM2_CM_MEM_PWR_STATUS2 +#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4 +#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6 +#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L +#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L +//CM2_CM_3DLUT_MODE +#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0 +#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4 +#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L +#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L +//CM2_CM_3DLUT_INDEX +#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0 +#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL +//CM2_CM_3DLUT_DATA +#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0 +#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10 +#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL +#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L +//CM2_CM_3DLUT_DATA_30BIT +#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2 +#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL +//CM2_CM_3DLUT_READ_WRITE_CONTROL +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4 +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8 +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10 +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L +#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L +//CM2_CM_3DLUT_OUT_NORM_FACTOR +#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0 +#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL +//CM2_CM_3DLUT_OUT_OFFSET_R +#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0 +#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10 +#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL +#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L +//CM2_CM_3DLUT_OUT_OFFSET_G +#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0 +#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10 +#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL +#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L +//CM2_CM_3DLUT_OUT_OFFSET_B +#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0 +#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 +#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL +#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L + + +// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec +//DPP_TOP3_DPP_CONTROL +#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4 +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8 +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe +#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10 +#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12 +#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14 +#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L +#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L +#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L +#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L +#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L +//DPP_TOP3_DPP_SOFT_RESET +#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0 +#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4 +#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8 +#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc +#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L +#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L +#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L +#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L +//DPP_TOP3_DPP_CRC_VAL_R_G +#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0 +#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10 +#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL +#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L +//DPP_TOP3_DPP_CRC_VAL_B_A +#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0 +#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10 +#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL +#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L +//DPP_TOP3_DPP_CRC_CTRL +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10 +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L +#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L +//DPP_TOP3_HOST_READ_CONTROL +#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0 +#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL + + +// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec +//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT +#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0 +#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL +//CNVC_CFG3_FORMAT_CONTROL +#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0 +#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4 +#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8 +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd +#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10 +#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11 +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14 +#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L +#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L +#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L +#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L +#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L +#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L +//CNVC_CFG3_FCNV_FP_BIAS_R +#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL +//CNVC_CFG3_FCNV_FP_BIAS_G +#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL +//CNVC_CFG3_FCNV_FP_BIAS_B +#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL +//CNVC_CFG3_FCNV_FP_SCALE_R +#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL +//CNVC_CFG3_FCNV_FP_SCALE_G +#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL +//CNVC_CFG3_FCNV_FP_SCALE_B +#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0 +#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL +//CNVC_CFG3_COLOR_KEYER_CONTROL +#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0 +#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4 +#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L +#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L +//CNVC_CFG3_COLOR_KEYER_ALPHA +#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0 +#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10 +#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL +#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L +//CNVC_CFG3_COLOR_KEYER_RED +#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0 +#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10 +#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL +#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L +//CNVC_CFG3_COLOR_KEYER_GREEN +#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0 +#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10 +#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL +#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L +//CNVC_CFG3_COLOR_KEYER_BLUE +#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0 +#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10 +#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL +#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L +//CNVC_CFG3_ALPHA_2BIT_LUT +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0 +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8 +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10 +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18 +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L +#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L + + +// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec +//CNVC_CUR3_CURSOR0_CONTROL +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10 +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L +#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L +//CNVC_CUR3_CURSOR0_COLOR0 +#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0 +#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL +//CNVC_CUR3_CURSOR0_COLOR1 +#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0 +#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL +//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS +#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0 +#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10 +#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL +#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L + + +// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec +//DSCL3_SCL_COEF_RAM_TAP_SELECT +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0 +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8 +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10 +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L +#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L +//DSCL3_SCL_COEF_RAM_TAP_DATA +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0 +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10 +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L +#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L +//DSCL3_SCL_MODE +#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0 +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8 +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc +#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10 +#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14 +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18 +#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L +#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L +#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L +#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L +//DSCL3_SCL_TAP_CONTROL +#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0 +#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4 +#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8 +#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc +#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L +#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L +#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L +#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L +//DSCL3_DSCL_CONTROL +#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0 +#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L +//DSCL3_DSCL_2TAP_CONTROL +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18 +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L +#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L +#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L +//DSCL3_SCL_MANUAL_REPLICATE_CONTROL +#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0 +#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8 +#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL +#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L +//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO +#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0 +#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL3_SCL_HORZ_FILTER_INIT +#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0 +#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18 +#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L +//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C +#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL3_SCL_HORZ_FILTER_INIT_C +#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0 +#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18 +#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L +//DSCL3_SCL_VERT_FILTER_SCALE_RATIO +#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL +//DSCL3_SCL_VERT_FILTER_INIT +#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18 +#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL +#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L +//DSCL3_SCL_VERT_FILTER_INIT_BOT +#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18 +#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL +#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L +//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C +#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL +//DSCL3_SCL_VERT_FILTER_INIT_C +#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18 +#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL +#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L +//DSCL3_SCL_VERT_FILTER_INIT_BOT_C +#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0 +#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18 +#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL +#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L +//DSCL3_SCL_BLACK_OFFSET +#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0 +#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10 +#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL +#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L +//DSCL3_DSCL_UPDATE +#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0 +#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L +//DSCL3_DSCL_AUTOCAL +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0 +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8 +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L +#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L +//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT +#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0 +#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10 +#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL +#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L +//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM +#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0 +#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10 +#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL +#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L +//DSCL3_OTG_H_BLANK +#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0 +#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10 +#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL +#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L +//DSCL3_OTG_V_BLANK +#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0 +#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10 +#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL +#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L +//DSCL3_RECOUT_START +#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0 +#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10 +#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL +#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L +//DSCL3_RECOUT_SIZE +#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0 +#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10 +#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL +#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L +//DSCL3_MPC_SIZE +#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0 +#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10 +#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL +#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L +//DSCL3_LB_DATA_FORMAT +#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0 +#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4 +#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L +#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L +//DSCL3_LB_MEMORY_CTRL +#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0 +#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8 +#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10 +#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18 +#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L +#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L +#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L +#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L +//DSCL3_LB_V_COUNTER +#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0 +#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10 +#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL +#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L +//DSCL3_DSCL_MEM_PWR_CTRL +#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18 +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c +#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L +#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L +//DSCL3_DSCL_MEM_PWR_STATUS +#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0 +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2 +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4 +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6 +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8 +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc +#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L +#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L +//DSCL3_OBUF_CONTROL +#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0 +#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4 +#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc +#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c +#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L +#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L +#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L +#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L +//DSCL3_OBUF_MEM_PWR_CTRL +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0 +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2 +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8 +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10 +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L +#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L + + +// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec +//CM3_CM_CONTROL +#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0 +#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8 +#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L +#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L +//CM3_CM_ICSC_CONTROL +#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0 +#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L +//CM3_CM_ICSC_C11_C12 +#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0 +#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10 +#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L +//CM3_CM_ICSC_C13_C14 +#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0 +#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10 +#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L +//CM3_CM_ICSC_C21_C22 +#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0 +#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10 +#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L +//CM3_CM_ICSC_C23_C24 +#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0 +#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10 +#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L +//CM3_CM_ICSC_C31_C32 +#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0 +#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10 +#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L +//CM3_CM_ICSC_C33_C34 +#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0 +#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10 +#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL +#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C11_C12 +#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0 +#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10 +#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C13_C14 +#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0 +#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10 +#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C21_C22 +#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0 +#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10 +#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C23_C24 +#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0 +#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10 +#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C31_C32 +#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0 +#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10 +#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L +//CM3_CM_ICSC_B_C33_C34 +#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0 +#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10 +#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL +#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_CONTROL +#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L +//CM3_CM_GAMUT_REMAP_C11_C12 +#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_C13_C14 +#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_C21_C22 +#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_C23_C24 +#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_C31_C32 +#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_C33_C34 +#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C11_C12 +#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C13_C14 +#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C21_C22 +#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C23_C24 +#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C31_C32 +#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L +//CM3_CM_GAMUT_REMAP_B_C33_C34 +#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0 +#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10 +#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL +#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L +//CM3_CM_BIAS_CR_R +#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0 +#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL +//CM3_CM_BIAS_Y_G_CB_B +#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0 +#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10 +#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL +#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L +//CM3_CM_DGAM_CONTROL +#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0 +#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L +//CM3_CM_DGAM_LUT_INDEX +#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0 +#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL +//CM3_CM_DGAM_LUT_DATA +#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0 +#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL +//CM3_CM_DGAM_LUT_WRITE_EN_MASK +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L +#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L +//CM3_CM_DGAM_RAMA_START_CNTL_B +#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_DGAM_RAMA_START_CNTL_G +#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_DGAM_RAMA_START_CNTL_R +#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_DGAM_RAMA_SLOPE_CNTL_B +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMA_SLOPE_CNTL_G +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMA_SLOPE_CNTL_R +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMA_END_CNTL1_B +#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMA_END_CNTL2_B +#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMA_END_CNTL1_G +#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMA_END_CNTL2_G +#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMA_END_CNTL1_R +#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMA_END_CNTL2_R +#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMA_REGION_0_1 +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_2_3 +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_4_5 +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_6_7 +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_8_9 +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_10_11 +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_12_13 +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMA_REGION_14_15 +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_START_CNTL_B +#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_DGAM_RAMB_START_CNTL_G +#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_DGAM_RAMB_START_CNTL_R +#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_DGAM_RAMB_SLOPE_CNTL_B +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMB_SLOPE_CNTL_G +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMB_SLOPE_CNTL_R +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM3_CM_DGAM_RAMB_END_CNTL1_B +#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMB_END_CNTL2_B +#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMB_END_CNTL1_G +#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMB_END_CNTL2_G +#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMB_END_CNTL1_R +#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM3_CM_DGAM_RAMB_END_CNTL2_R +#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM3_CM_DGAM_RAMB_REGION_0_1 +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_2_3 +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_4_5 +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_6_7 +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_8_9 +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_10_11 +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_12_13 +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_DGAM_RAMB_REGION_14_15 +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_CONTROL +#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0 +#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L +//CM3_CM_BLNDGAM_LUT_INDEX +#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0 +#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL +//CM3_CM_BLNDGAM_LUT_DATA +#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0 +#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL +//CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4 +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8 +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L +#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L +//CM3_CM_BLNDGAM_RAMA_START_CNTL_B +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMA_START_CNTL_G +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMA_START_CNTL_R +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMA_END_CNTL1_B +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMA_END_CNTL2_B +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMA_END_CNTL1_G +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMA_END_CNTL2_G +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMA_END_CNTL1_R +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMA_END_CNTL2_R +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMA_REGION_0_1 +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_2_3 +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_4_5 +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_6_7 +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_8_9 +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_10_11 +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_12_13 +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_14_15 +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_16_17 +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_18_19 +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_20_21 +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_22_23 +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_24_25 +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_26_27 +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_28_29 +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_30_31 +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMA_REGION_32_33 +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_START_CNTL_B +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMB_START_CNTL_G +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMB_START_CNTL_R +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//CM3_CM_BLNDGAM_RAMB_END_CNTL1_B +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMB_END_CNTL2_B +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMB_END_CNTL1_G +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMB_END_CNTL2_G +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMB_END_CNTL1_R +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//CM3_CM_BLNDGAM_RAMB_END_CNTL2_R +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//CM3_CM_BLNDGAM_RAMB_REGION_0_1 +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_2_3 +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_4_5 +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_6_7 +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_8_9 +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_10_11 +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_12_13 +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_14_15 +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_16_17 +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_18_19 +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_20_21 +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_22_23 +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_24_25 +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_26_27 +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_28_29 +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_30_31 +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_BLNDGAM_RAMB_REGION_32_33 +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_HDR_MULT_COEF +#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0 +#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL +//CM3_CM_MEM_PWR_CTRL +#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0 +#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2 +#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4 +#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6 +#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L +#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L +#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L +#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L +//CM3_CM_MEM_PWR_STATUS +#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0 +#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2 +#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L +#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL +//CM3_CM_DEALPHA +#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0 +#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L +//CM3_CM_COEF_FORMAT +#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0 +#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4 +#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8 +#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L +#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L +#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM3_CM_SHAPER_CONTROL +#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 +#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L +//CM3_CM_SHAPER_OFFSET_R +#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0 +#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL +//CM3_CM_SHAPER_OFFSET_G +#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0 +#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL +//CM3_CM_SHAPER_OFFSET_B +#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0 +#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL +//CM3_CM_SHAPER_SCALE_R +#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0 +#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL +//CM3_CM_SHAPER_SCALE_G_B +#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0 +#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10 +#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L +//CM3_CM_SHAPER_LUT_INDEX +#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0 +#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL +//CM3_CM_SHAPER_LUT_DATA +#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0 +#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL +//CM3_CM_SHAPER_LUT_WRITE_EN_MASK +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4 +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8 +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L +#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L +//CM3_CM_SHAPER_RAMA_START_CNTL_B +#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMA_START_CNTL_G +#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMA_START_CNTL_R +#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMA_END_CNTL_B +#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMA_END_CNTL_G +#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMA_END_CNTL_R +#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMA_REGION_0_1 +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_2_3 +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_4_5 +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_6_7 +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_8_9 +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_10_11 +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_12_13 +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_14_15 +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_16_17 +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_18_19 +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_20_21 +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_22_23 +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_24_25 +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_26_27 +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_28_29 +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_30_31 +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMA_REGION_32_33 +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_START_CNTL_B +#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMB_START_CNTL_G +#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMB_START_CNTL_R +#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//CM3_CM_SHAPER_RAMB_END_CNTL_B +#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMB_END_CNTL_G +#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMB_END_CNTL_R +#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L +//CM3_CM_SHAPER_RAMB_REGION_0_1 +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_2_3 +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_4_5 +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_6_7 +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_8_9 +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_10_11 +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_12_13 +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_14_15 +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_16_17 +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_18_19 +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_20_21 +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_22_23 +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_24_25 +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_26_27 +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_28_29 +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_30_31 +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_SHAPER_RAMB_REGION_32_33 +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//CM3_CM_MEM_PWR_CTRL2 +#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8 +#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa +#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc +#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe +#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L +#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L +#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L +#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L +//CM3_CM_MEM_PWR_STATUS2 +#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4 +#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6 +#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L +#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L +//CM3_CM_3DLUT_MODE +#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0 +#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4 +#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L +#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L +//CM3_CM_3DLUT_INDEX +#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0 +#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL +//CM3_CM_3DLUT_DATA +#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0 +#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10 +#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL +#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L +//CM3_CM_3DLUT_DATA_30BIT +#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2 +#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL +//CM3_CM_3DLUT_READ_WRITE_CONTROL +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0 +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4 +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8 +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10 +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L +#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L +//CM3_CM_3DLUT_OUT_NORM_FACTOR +#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0 +#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL +//CM3_CM_3DLUT_OUT_OFFSET_R +#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0 +#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10 +#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL +#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L +//CM3_CM_3DLUT_OUT_OFFSET_G +#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0 +#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10 +#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL +#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L +//CM3_CM_3DLUT_OUT_OFFSET_B +#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0 +#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 +#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL +#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM3_CM_TEST_DEBUG_INDEX +#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM3_CM_TEST_DEBUG_DATA +#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL + +// addressBlock: dce_dc_mpc_mpcc0_dispdec +//MPCC0_MPCC_TOP_SEL +#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0 +#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL +//MPCC0_MPCC_BOT_SEL +#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0 +#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL +//MPCC0_MPCC_OPP_ID +#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0 +#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL +//MPCC0_MPCC_CONTROL +#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0 +#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4 +#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6 +#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7 +#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8 +#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb +#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10 +#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18 +#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L +#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L +#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L +#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L +#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L +#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L +#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L +#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L +//MPCC0_MPCC_SM_CONTROL +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18 +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L +#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L +//MPCC0_MPCC_UPDATE_LOCK_SEL +#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0 +#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4 +#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL +#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L +//MPCC0_MPCC_TOP_GAIN +#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0 +#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL +//MPCC0_MPCC_BOT_GAIN_INSIDE +#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0 +#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL +//MPCC0_MPCC_BOT_GAIN_OUTSIDE +#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0 +#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL +//MPCC0_MPCC_BG_R_CR +#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0 +#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL +//MPCC0_MPCC_BG_G_Y +#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0 +#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL +//MPCC0_MPCC_BG_B_CB +#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0 +#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL +//MPCC0_MPCC_MEM_PWR_CTRL +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0 +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2 +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4 +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L +#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L +//MPCC0_MPCC_STALL_STATUS +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0 +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4 +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8 +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L +#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L +//MPCC0_MPCC_STATUS +#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0 +#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1 +#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2 +#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d +#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e +#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f +#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L +#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L +#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L +#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L +#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L +#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L +// addressBlock: dce_dc_mpc_mpcc1_dispdec +//MPCC1_MPCC_TOP_SEL +#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0 +#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL +//MPCC1_MPCC_BOT_SEL +#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0 +#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL +//MPCC1_MPCC_OPP_ID +#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0 +#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL +//MPCC1_MPCC_CONTROL +#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0 +#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4 +#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6 +#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7 +#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8 +#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb +#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10 +#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18 +#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L +#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L +#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L +#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L +#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L +#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L +#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L +#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L +//MPCC1_MPCC_SM_CONTROL +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18 +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L +#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L +//MPCC1_MPCC_UPDATE_LOCK_SEL +#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0 +#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4 +#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL +#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L +//MPCC1_MPCC_TOP_GAIN +#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0 +#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL +//MPCC1_MPCC_BOT_GAIN_INSIDE +#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0 +#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL +//MPCC1_MPCC_BOT_GAIN_OUTSIDE +#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0 +#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL +//MPCC1_MPCC_BG_R_CR +#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0 +#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL +//MPCC1_MPCC_BG_G_Y +#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0 +#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL +//MPCC1_MPCC_BG_B_CB +#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0 +#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL +//MPCC1_MPCC_MEM_PWR_CTRL +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0 +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2 +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4 +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L +#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L +//MPCC1_MPCC_STALL_STATUS +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0 +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4 +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8 +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L +#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L +//MPCC1_MPCC_STATUS +#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0 +#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1 +#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2 +#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d +#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e +#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f +#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L +#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L +#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L +#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L +#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L +#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L +// addressBlock: dce_dc_mpc_mpcc2_dispdec +//MPCC2_MPCC_TOP_SEL +#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0 +#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL +//MPCC2_MPCC_BOT_SEL +#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0 +#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL +//MPCC2_MPCC_OPP_ID +#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0 +#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL +//MPCC2_MPCC_CONTROL +#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0 +#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4 +#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6 +#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7 +#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8 +#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb +#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10 +#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18 +#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L +#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L +#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L +#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L +#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L +#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L +#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L +#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L +//MPCC2_MPCC_SM_CONTROL +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18 +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L +#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L +//MPCC2_MPCC_UPDATE_LOCK_SEL +#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0 +#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4 +#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL +#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L +//MPCC2_MPCC_TOP_GAIN +#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0 +#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL +//MPCC2_MPCC_BOT_GAIN_INSIDE +#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0 +#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL +//MPCC2_MPCC_BOT_GAIN_OUTSIDE +#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0 +#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL +//MPCC2_MPCC_BG_R_CR +#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0 +#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL +//MPCC2_MPCC_BG_G_Y +#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0 +#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL +//MPCC2_MPCC_BG_B_CB +#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0 +#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL +//MPCC2_MPCC_MEM_PWR_CTRL +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0 +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2 +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4 +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L +#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L +//MPCC2_MPCC_STALL_STATUS +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0 +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4 +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8 +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L +#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L +//MPCC2_MPCC_STATUS +#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0 +#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1 +#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2 +#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d +#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e +#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f +#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L +#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L +#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L +#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L +#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L +#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L +// addressBlock: dce_dc_mpc_mpcc3_dispdec +//MPCC3_MPCC_TOP_SEL +#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0 +#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL +//MPCC3_MPCC_BOT_SEL +#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0 +#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL +//MPCC3_MPCC_OPP_ID +#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0 +#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL +//MPCC3_MPCC_CONTROL +#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0 +#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4 +#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6 +#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7 +#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8 +#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb +#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10 +#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18 +#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L +#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L +#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L +#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L +#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L +#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L +#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L +#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L +//MPCC3_MPCC_SM_CONTROL +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18 +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L +#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L +//MPCC3_MPCC_UPDATE_LOCK_SEL +#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0 +#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4 +#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL +#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L +//MPCC3_MPCC_TOP_GAIN +#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0 +#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL +//MPCC3_MPCC_BOT_GAIN_INSIDE +#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0 +#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL +//MPCC3_MPCC_BOT_GAIN_OUTSIDE +#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0 +#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL +//MPCC3_MPCC_BG_R_CR +#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0 +#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL +//MPCC3_MPCC_BG_G_Y +#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0 +#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL +//MPCC3_MPCC_BG_B_CB +#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0 +#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL +//MPCC3_MPCC_MEM_PWR_CTRL +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0 +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2 +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4 +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L +#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L +//MPCC3_MPCC_STALL_STATUS +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0 +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4 +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8 +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L +#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L +//MPCC3_MPCC_STATUS +#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0 +#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1 +#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2 +#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d +#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e +#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f +#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L +#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L +#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L +#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L +#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L +#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L +// addressBlock: dce_dc_mpc_mpcc4_dispdec +//MPCC4_MPCC_TOP_SEL +#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0 +#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL +//MPCC4_MPCC_BOT_SEL +#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0 +#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL +//MPCC4_MPCC_OPP_ID +#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0 +#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL +//MPCC4_MPCC_CONTROL +#define MPCC4_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0 +#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4 +#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6 +#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7 +#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8 +#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb +#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10 +#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18 +#define MPCC4_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L +#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L +#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L +#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L +#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L +#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L +#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L +#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L +//MPCC4_MPCC_SM_CONTROL +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18 +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L +#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L +//MPCC4_MPCC_UPDATE_LOCK_SEL +#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0 +#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4 +#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL +#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L +//MPCC4_MPCC_TOP_GAIN +#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0 +#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL +//MPCC4_MPCC_BOT_GAIN_INSIDE +#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0 +#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL +//MPCC4_MPCC_BOT_GAIN_OUTSIDE +#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0 +#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL +//MPCC4_MPCC_BG_R_CR +#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0 +#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL +//MPCC4_MPCC_BG_G_Y +#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0 +#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL +//MPCC4_MPCC_BG_B_CB +#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0 +#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL +//MPCC4_MPCC_MEM_PWR_CTRL +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0 +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2 +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4 +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L +#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L +//MPCC4_MPCC_STALL_STATUS +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0 +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4 +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8 +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L +#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L +//MPCC4_MPCC_STATUS +#define MPCC4_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0 +#define MPCC4_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1 +#define MPCC4_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2 +#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d +#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e +#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f +#define MPCC4_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L +#define MPCC4_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L +#define MPCC4_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L +#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L +#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L +#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L +// addressBlock: dce_dc_mpc_mpc_cfg_dispdec +//MPC_CLOCK_CONTROL +#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1 +#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L +//MPC_SOFT_RESET +#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0 +#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1 +#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2 +#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3 +#define MPC_SOFT_RESET__MPCC4_SOFT_RESET__SHIFT 0x4 +#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa +#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb +#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc +#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd +#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14 +#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15 +#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f +#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L +#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L +#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L +#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L +#define MPC_SOFT_RESET__MPCC4_SOFT_RESET_MASK 0x00000010L +#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L +#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L +#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L +#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L +#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L +#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L +#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L +//MPC_BYPASS_BG_AR +#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0 +#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10 +#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL +#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L +//MPC_BYPASS_BG_GB +#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0 +#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10 +#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL +#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L +//MPC_STALL_GRACE_WINDOW +#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD__SHIFT 0x0 +#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD_MASK 0x000000FFL +//MPC_HOST_READ_CONTROL +#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0 +#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL +//ADR_CFG_CUR_VUPDATE_LOCK_SET0 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_VUPDATE_LOCK_SET0 +#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_VUPDATE_LOCK_SET0 +#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L +//CFG_VUPDATE_LOCK_SET0 +#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//CUR_VUPDATE_LOCK_SET0 +#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_CUR_VUPDATE_LOCK_SET1 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_VUPDATE_LOCK_SET1 +#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_VUPDATE_LOCK_SET1 +#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L +//CFG_VUPDATE_LOCK_SET1 +#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//CUR_VUPDATE_LOCK_SET1 +#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_CUR_VUPDATE_LOCK_SET2 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_VUPDATE_LOCK_SET2 +#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_VUPDATE_LOCK_SET2 +#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L +//CFG_VUPDATE_LOCK_SET2 +#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//CUR_VUPDATE_LOCK_SET2 +#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_CUR_VUPDATE_LOCK_SET3 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_CFG_VUPDATE_LOCK_SET3 +#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//ADR_VUPDATE_LOCK_SET3 +#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L +//CFG_VUPDATE_LOCK_SET3 +#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L +//CUR_VUPDATE_LOCK_SET3 +#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0 +#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L +//MPC_OUT0_MUX +#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0 +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5 +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6 +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7 +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8 +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9 +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14 +#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L +#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L +#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L +//MPC_OUT0_DENORM_CONTROL +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0 +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18 +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L +#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L +//MPC_OUT0_DENORM_CLAMP_G_Y +#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0 +#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc +#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL +#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L +//MPC_OUT0_DENORM_CLAMP_B_CB +#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0 +#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc +#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL +#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L +//MPC_OUT1_MUX +#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0 +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5 +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6 +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7 +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8 +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9 +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14 +#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L +#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L +#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L +//MPC_OUT1_DENORM_CONTROL +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0 +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18 +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L +#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L +//MPC_OUT1_DENORM_CLAMP_G_Y +#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0 +#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc +#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL +#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L +//MPC_OUT1_DENORM_CLAMP_B_CB +#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0 +#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc +#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL +#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L +// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec +//MPCC_OGAM0_MPCC_OGAM_MODE +#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L +//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX +#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL +//MPCC_OGAM0_MPCC_OGAM_LUT_DATA +#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL +//MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3 +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4 +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L +#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L +//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L + + +// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec +//MPCC_OGAM1_MPCC_OGAM_MODE +#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L +//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX +#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL +//MPCC_OGAM1_MPCC_OGAM_LUT_DATA +#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL +//MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3 +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4 +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L +#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L +//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L + + +// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec +//MPCC_OGAM2_MPCC_OGAM_MODE +#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L +//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX +#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL +//MPCC_OGAM2_MPCC_OGAM_LUT_DATA +#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL +//MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3 +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4 +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L +#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L +//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L + + +// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec +//MPCC_OGAM3_MPCC_OGAM_MODE +#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L +//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX +#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL +//MPCC_OGAM3_MPCC_OGAM_LUT_DATA +#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL +//MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3 +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4 +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L +#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L +//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L + + +// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec +//MPCC_OGAM4_MPCC_OGAM_MODE +#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L +//MPCC_OGAM4_MPCC_OGAM_LUT_INDEX +#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL +//MPCC_OGAM4_MPCC_OGAM_LUT_DATA +#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL +//MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3 +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4 +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L +#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L +//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL +//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L +//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10 +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L +#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L + + +// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec +//MPC_OUT_CSC_COEF_FORMAT +#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0 +#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1 +#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L +#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L +//MPC_OUT0_CSC_MODE +#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0 +#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L +//MPC_OUT0_CSC_C11_C12_A +#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C13_C14_A +#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C21_C22_A +#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C23_C24_A +#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C31_C32_A +#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C33_C34_A +#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0 +#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10 +#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C11_C12_B +#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C13_C14_B +#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C21_C22_B +#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C23_C24_B +#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C31_C32_B +#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L +//MPC_OUT0_CSC_C33_C34_B +#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0 +#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10 +#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL +#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_MODE +#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0 +#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L +//MPC_OUT1_CSC_C11_C12_A +#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C13_C14_A +#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C21_C22_A +#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C23_C24_A +#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C31_C32_A +#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C33_C34_A +#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0 +#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10 +#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C11_C12_B +#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C13_C14_B +#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C21_C22_B +#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C23_C24_B +#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C31_C32_B +#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L +//MPC_OUT1_CSC_C33_C34_B +#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0 +#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10 +#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL +#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L +//MPC_OCSC_TEST_DEBUG_INDEX +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L + + +// addressBlock: dce_dc_opp_fmt0_dispdec +//FMT0_FMT_CLAMP_COMPONENT_R +#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0 +#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10 +#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL +#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L +//FMT0_FMT_CLAMP_COMPONENT_G +#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0 +#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10 +#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL +#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L +//FMT0_FMT_CLAMP_COMPONENT_B +#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0 +#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10 +#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL +#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L +//FMT0_FMT_DYNAMIC_EXP_CNTL +#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0 +#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4 +#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L +#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L +//FMT0_FMT_CONTROL +#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0 +#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8 +#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc +#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10 +#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12 +#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14 +#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15 +#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18 +#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L +#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L +#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L +#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L +#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L +#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L +#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L +#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L +//FMT0_FMT_BIT_DEPTH_CONTROL +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19 +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L +#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L +//FMT0_FMT_DITHER_RAND_R_SEED +#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0 +#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10 +#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL +#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L +//FMT0_FMT_DITHER_RAND_G_SEED +#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0 +#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10 +#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL +#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L +//FMT0_FMT_DITHER_RAND_B_SEED +#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0 +#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10 +#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL +#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L +//FMT0_FMT_CLAMP_CNTL +#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0 +#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10 +#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L +#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L +//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL +#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0 +#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL +//FMT0_FMT_MAP420_MEMORY_CONTROL +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0 +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4 +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8 +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L +#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L +//FMT0_FMT_422_CONTROL +#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0 +#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L + + +// addressBlock: dce_dc_opp_dpg0_dispdec +//DPG0_DPG_CONTROL +#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0 +#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4 +#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8 +#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc +#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10 +#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14 +#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18 +#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L +#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L +#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L +#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L +#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L +#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L +#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L +//DPG0_DPG_RAMP_CONTROL +#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0 +#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18 +#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c +#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL +#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L +#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L +//DPG0_DPG_DIMENSIONS +#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0 +#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10 +#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL +#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L +//DPG0_DPG_COLOUR_R_CR +#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0 +#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10 +#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL +#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L +//DPG0_DPG_COLOUR_G_Y +#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0 +#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10 +#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL +#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L +//DPG0_DPG_COLOUR_B_CB +#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0 +#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10 +#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL +#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L +//DPG0_DPG_OFFSET_SEGMENT +#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0 +#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10 +#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL +#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L +//DPG0_DPG_STATUS +#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0 +#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L + + +// addressBlock: dce_dc_opp_oppbuf0_dispdec +//OPPBUF0_OPPBUF_CONTROL +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0 +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10 +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14 +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18 +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L +#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L +//OPPBUF0_OPPBUF_3D_PARAMETERS_0 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L +#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L +//OPPBUF0_OPPBUF_3D_PARAMETERS_1 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10 +#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL +#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L + + +// addressBlock: dce_dc_opp_opp_pipe0_dispdec +//OPP_PIPE0_OPP_PIPE_CONTROL +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0 +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1 +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4 +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L +#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L + + +// addressBlock: dce_dc_opp_fmt1_dispdec +//FMT1_FMT_CLAMP_COMPONENT_R +#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0 +#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10 +#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL +#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L +//FMT1_FMT_CLAMP_COMPONENT_G +#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0 +#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10 +#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL +#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L +//FMT1_FMT_CLAMP_COMPONENT_B +#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0 +#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10 +#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL +#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L +//FMT1_FMT_DYNAMIC_EXP_CNTL +#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0 +#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4 +#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L +#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L +//FMT1_FMT_CONTROL +#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0 +#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8 +#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc +#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10 +#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12 +#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14 +#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15 +#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18 +#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L +#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L +#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L +#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L +#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L +#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L +#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L +#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L +//FMT1_FMT_BIT_DEPTH_CONTROL +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19 +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L +#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L +//FMT1_FMT_DITHER_RAND_R_SEED +#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0 +#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10 +#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL +#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L +//FMT1_FMT_DITHER_RAND_G_SEED +#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0 +#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10 +#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL +#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L +//FMT1_FMT_DITHER_RAND_B_SEED +#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0 +#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10 +#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL +#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L +//FMT1_FMT_CLAMP_CNTL +#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0 +#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10 +#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L +#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L +//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL +#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0 +#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL +//FMT1_FMT_MAP420_MEMORY_CONTROL +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0 +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4 +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8 +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L +#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L +//FMT1_FMT_422_CONTROL +#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0 +#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L + + +// addressBlock: dce_dc_opp_dpg1_dispdec +//DPG1_DPG_CONTROL +#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0 +#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4 +#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8 +#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc +#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10 +#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14 +#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18 +#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L +#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L +#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L +#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L +#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L +#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L +#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L +//DPG1_DPG_RAMP_CONTROL +#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0 +#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18 +#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c +#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL +#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L +#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L +//DPG1_DPG_DIMENSIONS +#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0 +#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10 +#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL +#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L +//DPG1_DPG_COLOUR_R_CR +#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0 +#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10 +#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL +#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L +//DPG1_DPG_COLOUR_G_Y +#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0 +#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10 +#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL +#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L +//DPG1_DPG_COLOUR_B_CB +#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0 +#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10 +#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL +#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L +//DPG1_DPG_OFFSET_SEGMENT +#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0 +#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10 +#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL +#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L +//DPG1_DPG_STATUS +#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0 +#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L + + +// addressBlock: dce_dc_opp_oppbuf1_dispdec +//OPPBUF1_OPPBUF_CONTROL +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0 +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10 +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14 +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18 +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L +#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L +//OPPBUF1_OPPBUF_3D_PARAMETERS_0 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L +#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L +//OPPBUF1_OPPBUF_3D_PARAMETERS_1 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10 +#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL +#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L + + +// addressBlock: dce_dc_opp_opp_pipe1_dispdec +//OPP_PIPE1_OPP_PIPE_CONTROL +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0 +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1 +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4 +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L +#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L + + + + +// addressBlock: dce_dc_opp_opp_top_dispdec +//OPP_TOP_CLK_CONTROL +#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0 +#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4 +#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc +#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd +#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L +#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L +#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L +#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L + + +// addressBlock: dce_dc_optc_odm0_dispdec +//ODM0_OPTC_INPUT_GLOBAL_CONTROL +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0 +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8 +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9 +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L +#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L +//ODM0_OPTC_DATA_SOURCE_SELECT +#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8 +#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L +//ODM0_OPTC_DATA_FORMAT_CONTROL +#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0 +#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4 +#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L +#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L +//ODM0_OPTC_BYTES_PER_PIXEL +#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0 +#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL +//ODM0_OPTC_WIDTH_CONTROL +#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10 +#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L +//ODM0_OPTC_INPUT_CLOCK_CONTROL +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0 +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1 +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2 +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L +#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L +//ODM0_OPTC_INPUT_SPARE_REGISTER +#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0 +#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_optc_odm1_dispdec +//ODM1_OPTC_INPUT_GLOBAL_CONTROL +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0 +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8 +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9 +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L +#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L +//ODM1_OPTC_DATA_SOURCE_SELECT +#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8 +#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L +//ODM1_OPTC_DATA_FORMAT_CONTROL +#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0 +#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4 +#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L +#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L +//ODM1_OPTC_BYTES_PER_PIXEL +#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0 +#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL +//ODM1_OPTC_WIDTH_CONTROL +#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10 +#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L +//ODM1_OPTC_INPUT_CLOCK_CONTROL +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0 +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1 +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2 +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L +#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L +//ODM1_OPTC_INPUT_SPARE_REGISTER +#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0 +#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_optc_otg0_dispdec +//OTG0_OTG_H_TOTAL +#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0 +#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL +//OTG0_OTG_H_BLANK_START_END +#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0 +#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10 +#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL +#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L +//OTG0_OTG_H_SYNC_A +#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0 +#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10 +#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL +#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L +//OTG0_OTG_H_SYNC_A_CNTL +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0 +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10 +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11 +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L +#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L +//OTG0_OTG_H_TIMING_CNTL +#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0 +#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8 +#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L +#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L +//OTG0_OTG_V_TOTAL +#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL +//OTG0_OTG_V_TOTAL_MIN +#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL +//OTG0_OTG_V_TOTAL_MAX +#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL +//OTG0_OTG_V_TOTAL_MID +#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL +//OTG0_OTG_V_TOTAL_CONTROL +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10 +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L +#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L +//OTG0_OTG_V_TOTAL_INT_STATUS +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0 +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4 +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8 +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L +#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L +//OTG0_OTG_VSYNC_NOM_INT_STATUS +#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0 +#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4 +#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L +#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L +//OTG0_OTG_V_BLANK_START_END +#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0 +#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10 +#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL +#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L +//OTG0_OTG_V_SYNC_A +#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0 +#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10 +#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL +#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L +//OTG0_OTG_V_SYNC_A_CNTL +#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0 +#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L +//OTG0_OTG_TRIGA_CNTL +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18 +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L +#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L +//OTG0_OTG_TRIGA_MANUAL_TRIG +#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0 +#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L +//OTG0_OTG_TRIGB_CNTL +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18 +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L +#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L +//OTG0_OTG_TRIGB_MANUAL_TRIG +#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0 +#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L +//OTG0_OTG_FORCE_COUNT_NOW_CNTL +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0 +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4 +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8 +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10 +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18 +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L +#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L +//OTG0_OTG_STEREO_FORCE_NEXT_EYE +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0 +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8 +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10 +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L +#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L +//OTG0_OTG_CONTROL +#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0 +#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8 +#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc +#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd +#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe +#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10 +#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18 +#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e +#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f +#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L +#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L +#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L +#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L +#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L +#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L +#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L +#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L +#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L +//OTG0_OTG_BLANK_CONTROL +#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0 +#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8 +#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10 +#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L +#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L +#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L +//OTG0_OTG_INTERLACE_CONTROL +#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0 +#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10 +#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L +#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L +//OTG0_OTG_INTERLACE_STATUS +#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0 +#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1 +#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L +#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L +//OTG0_OTG_PIXEL_DATA_READBACK0 +#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0 +#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10 +#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL +#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L +//OTG0_OTG_PIXEL_DATA_READBACK1 +#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0 +#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL +//OTG0_OTG_STATUS +#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0 +#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1 +#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2 +#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3 +#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5 +#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10 +#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11 +#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12 +#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L +#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L +#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L +#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L +#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L +#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L +#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L +#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L +//OTG0_OTG_STATUS_POSITION +#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0 +#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10 +#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL +#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L +//OTG0_OTG_NOM_VERT_POSITION +#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0 +#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL +//OTG0_OTG_STATUS_FRAME_COUNT +#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0 +#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL +//OTG0_OTG_STATUS_VF_COUNT +#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0 +#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL +//OTG0_OTG_STATUS_HV_COUNT +#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0 +#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL +//OTG0_OTG_COUNT_CONTROL +#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0 +#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1 +#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L +#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL +//OTG0_OTG_COUNT_RESET +#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0 +#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L +//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE +#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0 +#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L +//OTG0_OTG_VERT_SYNC_CONTROL +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0 +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8 +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10 +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L +#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L +//OTG0_OTG_STEREO_STATUS +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0 +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8 +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10 +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14 +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18 +#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e +#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L +#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L +#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L +#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L +//OTG0_OTG_STEREO_CONTROL +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0 +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11 +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12 +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13 +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14 +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18 +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L +#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L +#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L +//OTG0_OTG_SNAPSHOT_STATUS +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0 +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1 +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2 +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L +#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L +//OTG0_OTG_SNAPSHOT_CONTROL +#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0 +#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L +//OTG0_OTG_SNAPSHOT_POSITION +#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0 +#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10 +#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL +#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L +//OTG0_OTG_SNAPSHOT_FRAME +#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0 +#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL +//OTG0_OTG_INTERRUPT_CONTROL +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19 +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L +#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L +//OTG0_OTG_UPDATE_LOCK +#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0 +#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L +//OTG0_OTG_DOUBLE_BUFFER_CONTROL +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18 +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L +#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L +//OTG0_OTG_MASTER_EN +#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0 +#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L +//OTG0_OTG_BLANK_DATA_COLOR +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0 +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14 +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L +#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L +//OTG0_OTG_BLANK_DATA_COLOR_EXT +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0 +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8 +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10 +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L +#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L +//OTG0_OTG_BLACK_COLOR +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0 +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14 +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L +#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L +//OTG0_OTG_BLACK_COLOR_EXT +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0 +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8 +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10 +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L +#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L +//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION +#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0 +#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10 +#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL +#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L +//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4 +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8 +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10 +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14 +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18 +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L +#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L +//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION +#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0 +#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL +//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8 +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10 +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14 +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18 +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L +#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L +//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION +#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0 +#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL +//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8 +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10 +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14 +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18 +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L +#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L +//OTG0_OTG_CRC_CNTL +#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8 +#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc +#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13 +#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14 +#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18 +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f +#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L +#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L +#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L +#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L +#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L +//OTG0_OTG_CRC_CNTL2 +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0 +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1 +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4 +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8 +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L +#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L +//OTG0_OTG_CRC0_WINDOWA_X_CONTROL +#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0 +#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10 +#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL +#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0 +#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10 +#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC0_WINDOWB_X_CONTROL +#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0 +#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10 +#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL +#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0 +#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10 +#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC0_DATA_RG +#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0 +#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10 +#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL +#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L +//OTG0_OTG_CRC0_DATA_B +#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0 +#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10 +#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL +#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L +//OTG0_OTG_CRC1_WINDOWA_X_CONTROL +#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0 +#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10 +#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL +#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0 +#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10 +#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC1_WINDOWB_X_CONTROL +#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0 +#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10 +#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL +#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0 +#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10 +#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL +#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L +//OTG0_OTG_CRC1_DATA_RG +#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0 +#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10 +#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL +#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L +//OTG0_OTG_CRC1_DATA_B +#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0 +#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10 +#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL +#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L +//OTG0_OTG_CRC_SIG_RED_GREEN_MASK +#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0 +#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10 +#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL +#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L +//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK +#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0 +#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10 +#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL +#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L +//OTG0_OTG_STATIC_SCREEN_CONTROL +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0 +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10 +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18 +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19 +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L +#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L +//OTG0_OTG_3D_STRUCTURE_CONTROL +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0 +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8 +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10 +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11 +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12 +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L +#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L +//OTG0_OTG_GSL_VSYNC_GAP +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18 +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L +#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L +//OTG0_OTG_MASTER_UPDATE_MODE +#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0 +#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L +//OTG0_OTG_CLOCK_CONTROL +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0 +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1 +#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4 +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8 +#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10 +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L +#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L +#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L +#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L +//OTG0_OTG_VSTARTUP_PARAM +#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0 +#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL +//OTG0_OTG_VUPDATE_PARAM +#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0 +#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10 +#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL +#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L +//OTG0_OTG_VREADY_PARAM +#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0 +#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL +//OTG0_OTG_GLOBAL_SYNC_STATUS +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19 +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L +#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L +//OTG0_OTG_MASTER_UPDATE_LOCK +#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0 +#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8 +#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L +#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L +//OTG0_OTG_GSL_CONTROL +#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10 +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c +#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f +#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L +#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L +#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L +//OTG0_OTG_GSL_WINDOW_X +#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0 +#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10 +#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL +#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L +//OTG0_OTG_GSL_WINDOW_Y +#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0 +#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10 +#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL +#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L +//OTG0_OTG_VUPDATE_KEEPOUT +#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0 +#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10 +#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f +#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL +#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L +#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L +//OTG0_OTG_GLOBAL_CONTROL0 +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0 +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8 +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19 +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L +#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L +//OTG0_OTG_GLOBAL_CONTROL1 +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0 +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10 +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L +#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L +//OTG0_OTG_GLOBAL_CONTROL2 +#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0 +#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa +#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10 +#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f +#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL +#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L +#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L +#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L +//OTG0_OTG_GLOBAL_CONTROL3 +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0 +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4 +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8 +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L +#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L +//OTG0_OTG_TRIG_MANUAL_CONTROL +#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0 +#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L +//OTG0_OTG_RANGE_TIMING_INT_STATUS +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0 +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4 +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8 +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10 +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L +#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L +//OTG0_OTG_DRR_CONTROL +#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0 +#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10 +#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L +#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L +//OTG0_OTG_REQUEST_CONTROL +#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0 +#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L +//OTG0_OTG_DSC_START_POSITION +#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0 +#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10 +#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL +#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L +//OTG0_OTG_SPARE_REGISTER +#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0 +#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL + + +// addressBlock: dce_dc_optc_otg1_dispdec +//OTG1_OTG_H_TOTAL +#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0 +#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL +//OTG1_OTG_H_BLANK_START_END +#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0 +#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10 +#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL +#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L +//OTG1_OTG_H_SYNC_A +#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0 +#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10 +#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL +#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L +//OTG1_OTG_H_SYNC_A_CNTL +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0 +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10 +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11 +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L +#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L +//OTG1_OTG_H_TIMING_CNTL +#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0 +#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8 +#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L +#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L +//OTG1_OTG_V_TOTAL +#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL +//OTG1_OTG_V_TOTAL_MIN +#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL +//OTG1_OTG_V_TOTAL_MAX +#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL +//OTG1_OTG_V_TOTAL_MID +#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL +//OTG1_OTG_V_TOTAL_CONTROL +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10 +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L +#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L +//OTG1_OTG_V_TOTAL_INT_STATUS +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0 +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4 +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8 +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L +#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L +//OTG1_OTG_VSYNC_NOM_INT_STATUS +#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0 +#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4 +#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L +#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L +//OTG1_OTG_V_BLANK_START_END +#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0 +#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10 +#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL +#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L +//OTG1_OTG_V_SYNC_A +#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0 +#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10 +#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL +#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L +//OTG1_OTG_V_SYNC_A_CNTL +#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0 +#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L +//OTG1_OTG_TRIGA_CNTL +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18 +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L +#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L +//OTG1_OTG_TRIGA_MANUAL_TRIG +#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0 +#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L +//OTG1_OTG_TRIGB_CNTL +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18 +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L +#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L +//OTG1_OTG_TRIGB_MANUAL_TRIG +#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0 +#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L +//OTG1_OTG_FORCE_COUNT_NOW_CNTL +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0 +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4 +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8 +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10 +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18 +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L +#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L +//OTG1_OTG_STEREO_FORCE_NEXT_EYE +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0 +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8 +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10 +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L +#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L +//OTG1_OTG_CONTROL +#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0 +#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL__SHIFT 0x4 +#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8 +#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc +#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd +#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe +#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10 +#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18 +#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e +#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f +#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L +#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL_MASK 0x00000010L +#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L +#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L +#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L +#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L +#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L +#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L +#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L +#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L +//OTG1_OTG_BLANK_CONTROL +#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0 +#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8 +#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10 +#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L +#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L +#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L +//OTG1_OTG_INTERLACE_CONTROL +#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0 +#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10 +#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L +#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L +//OTG1_OTG_INTERLACE_STATUS +#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0 +#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1 +#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L +#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L +//OTG1_OTG_PIXEL_DATA_READBACK0 +#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0 +#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10 +#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL +#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L +//OTG1_OTG_PIXEL_DATA_READBACK1 +#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0 +#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL +//OTG1_OTG_STATUS +#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0 +#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1 +#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2 +#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3 +#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5 +#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10 +#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11 +#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12 +#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L +#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L +#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L +#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L +#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L +#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L +#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L +#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L +//OTG1_OTG_STATUS_POSITION +#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0 +#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10 +#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL +#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L +//OTG1_OTG_NOM_VERT_POSITION +#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0 +#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL +//OTG1_OTG_STATUS_FRAME_COUNT +#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0 +#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL +//OTG1_OTG_STATUS_VF_COUNT +#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0 +#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL +//OTG1_OTG_STATUS_HV_COUNT +#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0 +#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL +//OTG1_OTG_COUNT_CONTROL +#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0 +#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1 +#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L +#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL +//OTG1_OTG_COUNT_RESET +#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0 +#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L +//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE +#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0 +#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L +//OTG1_OTG_VERT_SYNC_CONTROL +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0 +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8 +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10 +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L +#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L +//OTG1_OTG_STEREO_STATUS +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0 +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8 +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10 +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14 +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18 +#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e +#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L +#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L +#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L +#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L +//OTG1_OTG_STEREO_CONTROL +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0 +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11 +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12 +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13 +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14 +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18 +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L +#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L +#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L +//OTG1_OTG_SNAPSHOT_STATUS +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0 +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1 +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2 +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L +#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L +//OTG1_OTG_SNAPSHOT_CONTROL +#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0 +#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L +//OTG1_OTG_SNAPSHOT_POSITION +#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0 +#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10 +#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL +#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L +//OTG1_OTG_SNAPSHOT_FRAME +#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0 +#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL +//OTG1_OTG_INTERRUPT_CONTROL +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19 +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L +#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L +//OTG1_OTG_UPDATE_LOCK +#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0 +#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L +//OTG1_OTG_DOUBLE_BUFFER_CONTROL +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18 +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L +#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L +//OTG1_OTG_MASTER_EN +#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0 +#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L +//OTG1_OTG_BLANK_DATA_COLOR +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0 +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14 +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L +#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L +//OTG1_OTG_BLANK_DATA_COLOR_EXT +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0 +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8 +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10 +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L +#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L +//OTG1_OTG_BLACK_COLOR +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0 +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14 +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L +#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L +//OTG1_OTG_BLACK_COLOR_EXT +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0 +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8 +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10 +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L +#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L +//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION +#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0 +#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10 +#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL +#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L +//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4 +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8 +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10 +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14 +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18 +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L +#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L +//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION +#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0 +#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL +//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8 +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10 +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14 +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18 +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L +#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L +//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION +#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0 +#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL +//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8 +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10 +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14 +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18 +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L +#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L +//OTG1_OTG_CRC_CNTL +#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8 +#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc +#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13 +#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14 +#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18 +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f +#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L +#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L +#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L +#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L +#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L +//OTG1_OTG_CRC_CNTL2 +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0 +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1 +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4 +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8 +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L +#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L +//OTG1_OTG_CRC0_WINDOWA_X_CONTROL +#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0 +#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10 +#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL +#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0 +#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10 +#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC0_WINDOWB_X_CONTROL +#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0 +#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10 +#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL +#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0 +#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10 +#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC0_DATA_RG +#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0 +#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10 +#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL +#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L +//OTG1_OTG_CRC0_DATA_B +#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0 +#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10 +#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL +#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L +//OTG1_OTG_CRC1_WINDOWA_X_CONTROL +#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0 +#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10 +#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL +#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0 +#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10 +#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC1_WINDOWB_X_CONTROL +#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0 +#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10 +#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL +#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0 +#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10 +#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL +#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L +//OTG1_OTG_CRC1_DATA_RG +#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0 +#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10 +#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL +#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L +//OTG1_OTG_CRC1_DATA_B +#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0 +#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10 +#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL +#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L +//OTG1_OTG_CRC_SIG_RED_GREEN_MASK +#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0 +#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10 +#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL +#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L +//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK +#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0 +#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10 +#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL +#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L +//OTG1_OTG_STATIC_SCREEN_CONTROL +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0 +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10 +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18 +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19 +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L +#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L +//OTG1_OTG_3D_STRUCTURE_CONTROL +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0 +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8 +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10 +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11 +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12 +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L +#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L +//OTG1_OTG_GSL_VSYNC_GAP +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18 +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L +#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L +//OTG1_OTG_MASTER_UPDATE_MODE +#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0 +#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L +//OTG1_OTG_CLOCK_CONTROL +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0 +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1 +#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4 +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8 +#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10 +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L +#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L +#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L +#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L +//OTG1_OTG_VSTARTUP_PARAM +#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0 +#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL +//OTG1_OTG_VUPDATE_PARAM +#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0 +#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10 +#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL +#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L +//OTG1_OTG_VREADY_PARAM +#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0 +#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL +//OTG1_OTG_GLOBAL_SYNC_STATUS +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19 +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L +#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L +//OTG1_OTG_MASTER_UPDATE_LOCK +#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0 +#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8 +#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L +#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L +//OTG1_OTG_GSL_CONTROL +#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10 +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c +#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f +#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L +#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L +#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L +//OTG1_OTG_GSL_WINDOW_X +#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0 +#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10 +#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL +#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L +//OTG1_OTG_GSL_WINDOW_Y +#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0 +#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10 +#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL +#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L +//OTG1_OTG_VUPDATE_KEEPOUT +#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0 +#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10 +#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f +#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL +#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L +#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L +//OTG1_OTG_GLOBAL_CONTROL0 +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0 +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8 +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19 +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L +#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L +//OTG1_OTG_GLOBAL_CONTROL1 +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0 +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10 +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L +#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L +//OTG1_OTG_GLOBAL_CONTROL2 +#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0 +#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa +#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10 +#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f +#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL +#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L +#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L +#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L +//OTG1_OTG_GLOBAL_CONTROL3 +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0 +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4 +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8 +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L +#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L +//OTG1_OTG_TRIG_MANUAL_CONTROL +#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0 +#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L +//OTG1_OTG_RANGE_TIMING_INT_STATUS +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0 +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4 +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8 +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10 +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L +#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L +//OTG1_OTG_DRR_CONTROL +#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0 +#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10 +#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L +#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L +//OTG1_OTG_REQUEST_CONTROL +#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0 +#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L +//OTG1_OTG_DSC_START_POSITION +#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0 +#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10 +#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL +#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L + + +// addressBlock: dce_dc_optc_optc_misc_dispdec +//DWB_SOURCE_SELECT +#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT__SHIFT 0x0 +#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT__SHIFT 0x3 +#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT__SHIFT 0x6 +#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT_MASK 0x00000007L +#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT_MASK 0x00000038L +#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT_MASK 0x000001C0L +//GSL_SOURCE_SELECT +#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0 +#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4 +#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8 +#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10 +#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L +#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L +#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L +#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L +//OPTC_CLOCK_CONTROL +#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0 +#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1 +#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L +#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L + +// addressBlock: dce_dc_dio_dout_i2c_dispdec +//DC_I2C_CONTROL +#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0 +#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1 +#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2 +#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3 +#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8 +#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14 +#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L +#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L +#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L +#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L +#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L +#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L +//DC_I2C_ARBITRATION +#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0 +#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2 +#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4 +#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8 +#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc +#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14 +#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15 +#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18 +#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19 +#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L +#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL +#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L +#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L +#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L +#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L +#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L +#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L +#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L +//DC_I2C_INTERRUPT_CONTROL +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9 +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L +#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L +//DC_I2C_SW_STATUS +#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0 +#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2 +#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4 +#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5 +#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6 +#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7 +#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8 +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf +#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12 +#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L +#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L +#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L +#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L +#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L +#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L +#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L +#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L +#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L +//DC_I2C_DDC1_HW_STATUS +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18 +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L +#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L +//DC_I2C_DDC2_HW_STATUS +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18 +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L +#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L +//DC_I2C_DDC1_SPEED +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0 +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4 +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8 +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10 +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L +#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L +//DC_I2C_DDC1_SETUP +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18 +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L +#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L +//DC_I2C_DDC2_SPEED +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0 +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4 +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8 +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10 +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L +#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L +//DC_I2C_DDC2_SETUP +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18 +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L +#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L +//DC_I2C_TRANSACTION0 +#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0 +#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8 +#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc +#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd +#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10 +#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L +#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L +#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L +#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L +#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L +//DC_I2C_TRANSACTION1 +#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0 +#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8 +#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc +#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd +#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10 +#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L +#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L +#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L +#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L +#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L +//DC_I2C_TRANSACTION2 +#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0 +#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8 +#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc +#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd +#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10 +#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L +#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L +#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L +#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L +#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L +//DC_I2C_TRANSACTION3 +#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0 +#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8 +#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc +#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd +#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10 +#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L +#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L +#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L +#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L +#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L +//DC_I2C_DATA +#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0 +#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8 +#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10 +#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f +#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L +#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L +#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L +#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L + +//DIO_SCRATCH0 +#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0 +#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL +//DIO_SCRATCH1 +#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0 +#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL +//DIO_SCRATCH2 +#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0 +#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL +//DIO_SCRATCH3 +#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0 +#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL +//DIO_SCRATCH4 +#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0 +#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL +//DIO_SCRATCH5 +#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0 +#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL +//DIO_SCRATCH6 +#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0 +#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL +//DIO_SCRATCH7 +#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0 +#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL +//DIO_MEM_PWR_STATUS +#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0 +#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3 +#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4 +#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5 +#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6 +#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7 +#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8 +#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9 +#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE__SHIFT 0xa +#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE__SHIFT 0xc +#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE__SHIFT 0xe +#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE__SHIFT 0x10 +#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE__SHIFT 0x12 +#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE__SHIFT 0x14 +#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE__SHIFT 0x16 +#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L +#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L +#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L +#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L +#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L +#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L +#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L +#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L +#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE_MASK 0x00000C00L +#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE_MASK 0x00003000L +#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE_MASK 0x0000C000L +#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE_MASK 0x00030000L +#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE_MASK 0x000C0000L +#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE_MASK 0x00300000L +#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE_MASK 0x00C00000L +//DIO_MEM_PWR_CTRL +#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0 +#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1 +#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4 +#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5 +#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6 +#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7 +#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8 +#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9 +#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa +#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE__SHIFT 0xb +#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS__SHIFT 0xd +#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE__SHIFT 0xe +#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS__SHIFT 0x10 +#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE__SHIFT 0x11 +#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS__SHIFT 0x13 +#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE__SHIFT 0x14 +#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS__SHIFT 0x16 +#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE__SHIFT 0x17 +#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS__SHIFT 0x19 +#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE__SHIFT 0x1a +#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS__SHIFT 0x1c +#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE__SHIFT 0x1d +#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS__SHIFT 0x1f +#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L +#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L +#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L +#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L +#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L +#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L +#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L +#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L +#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L +#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE_MASK 0x00001800L +#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS_MASK 0x00002000L +#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE_MASK 0x0000C000L +#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS_MASK 0x00010000L +#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE_MASK 0x00060000L +#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS_MASK 0x00080000L +#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE_MASK 0x00300000L +#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS_MASK 0x00400000L +#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE_MASK 0x01800000L +#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS_MASK 0x02000000L +#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE_MASK 0x0C000000L +#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS_MASK 0x10000000L +#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE_MASK 0x60000000L +#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS_MASK 0x80000000L +//DIO_MEM_PWR_CTRL2 +#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL__SHIFT 0x0 +#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS__SHIFT 0x4 +#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE__SHIFT 0x5 +#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS__SHIFT 0x6 +#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE__SHIFT 0x7 +#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS__SHIFT 0x8 +#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE__SHIFT 0x9 +#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS__SHIFT 0xa +#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE__SHIFT 0xb +#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS__SHIFT 0xc +#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE__SHIFT 0xd +#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS__SHIFT 0xe +#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE__SHIFT 0xf +#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18 +#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19 +#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a +#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b +#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c +#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d +#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e +#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL_MASK 0x00000003L +#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS_MASK 0x00000010L +#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE_MASK 0x00000020L +#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS_MASK 0x00000040L +#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE_MASK 0x00000080L +#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS_MASK 0x00000100L +#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE_MASK 0x00000200L +#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS_MASK 0x00000400L +#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE_MASK 0x00000800L +#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS_MASK 0x00001000L +#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE_MASK 0x00002000L +#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS_MASK 0x00004000L +#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE_MASK 0x00008000L +#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L +#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L +#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L +#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L +#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L +#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L +#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L +//DIO_CLK_CNTL +#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS__SHIFT 0x5 +#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x8 +#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS__SHIFT 0xa +#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x18 +#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x19 +#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x1a +#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x1b +#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x1c +#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x1d +#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS__SHIFT 0x1e +#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS_MASK 0x00000020L +#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x00000100L +#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS_MASK 0x00000400L +#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x01000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x02000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x04000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x08000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000L +#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS_MASK 0x40000000L +//DIO_MEM_PWR_CTRL3 +#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS__SHIFT 0x0 +#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE__SHIFT 0x1 +#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS__SHIFT 0x3 +#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE__SHIFT 0x4 +#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS__SHIFT 0x6 +#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE__SHIFT 0x7 +#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS__SHIFT 0x9 +#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE__SHIFT 0xa +#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS__SHIFT 0xc +#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE__SHIFT 0xd +#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS__SHIFT 0xf +#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE__SHIFT 0x10 +#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS_MASK 0x00000001L +#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE_MASK 0x00000006L +#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS_MASK 0x00000008L +#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE_MASK 0x00000030L +#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS_MASK 0x00000040L +#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE_MASK 0x00000180L +#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS_MASK 0x00000200L +#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE_MASK 0x00000C00L +#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS_MASK 0x00001000L +#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE_MASK 0x00006000L +#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS_MASK 0x00008000L +#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE_MASK 0x00030000L +//DIO_POWER_MANAGEMENT_CNTL +#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0 +#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8 +#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L +#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L + +//DIO_MEM_PWR_STATUS1 +#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE__SHIFT 0x0 +#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE__SHIFT 0x2 +#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE__SHIFT 0x4 +#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE__SHIFT 0x6 +#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE__SHIFT 0x8 +#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE__SHIFT 0xa +#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE__SHIFT 0x10 +#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE__SHIFT 0x12 +#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE__SHIFT 0x14 +#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE__SHIFT 0x16 +#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE__SHIFT 0x18 +#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE__SHIFT 0x1a +#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE_MASK 0x00000001L +#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE_MASK 0x00000004L +#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE_MASK 0x00000010L +#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE_MASK 0x00000040L +#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE_MASK 0x00000100L +#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE_MASK 0x00000400L +#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE_MASK 0x00030000L +#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE_MASK 0x000C0000L +#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE_MASK 0x00300000L +#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE_MASK 0x00C00000L +#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE_MASK 0x03000000L +#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE_MASK 0x0C000000L +//DIO_CLK_CNTL2 +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS__SHIFT 0x7 +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS__SHIFT 0x8 +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS__SHIFT 0x9 +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS__SHIFT 0xa +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS__SHIFT 0xb +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS__SHIFT 0xc +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS__SHIFT 0xd +#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11 +#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12 +#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13 +#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14 +#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15 +#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16 +#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17 +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS_MASK 0x00000080L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS_MASK 0x00000100L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS_MASK 0x00000200L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS_MASK 0x00000400L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS_MASK 0x00000800L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS_MASK 0x00001000L +#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS_MASK 0x00002000L +#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x00020000L +#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x00040000L +#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x00080000L +#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x00100000L +#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x00200000L +#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x00400000L +#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x00800000L +//DIO_CLK_CNTL3 +#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0 +#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1 +#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2 +#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3 +#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4 +#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5 +#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6 +#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa +#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb +#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc +#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd +#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe +#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf +#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10 +#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x00000001L +#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x00000002L +#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x00000004L +#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x00000008L +#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x00000010L +#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x00000020L +#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x00000040L +#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x00000400L +#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x00000800L +#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x00001000L +#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x00002000L +#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x00004000L +#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x00008000L +#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x00010000L +//DIO_HDMI_RXSTATUS_TIMER_CONTROL +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0 +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4 +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8 +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10 +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L +#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L +//DIO_GENERIC_INTERRUPT_MESSAGE +#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0 +#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1 +#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS_MASK 0x00000001L +#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL +//DIO_GENERIC_INTERRUPT_CLEAR +#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0 +#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR_MASK 0x00000001L + + +// addressBlock: dce_dc_dio_hpd0_dispdec +//HPD0_DC_HPD_INT_STATUS +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0 +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1 +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4 +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8 +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18 +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L +#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L +//HPD0_DC_HPD_INT_CONTROL +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18 +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L +#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L +//HPD0_DC_HPD_CONTROL +#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0 +#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10 +#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c +#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL +#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L +#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L +//HPD0_DC_HPD_FAST_TRAIN_CNTL +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0 +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18 +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L +#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L +//HPD0_DC_HPD_TOGGLE_FILT_CNTL +#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0 +#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14 +#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL +#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L + + +// addressBlock: dce_dc_dio_hpd1_dispdec +//HPD1_DC_HPD_INT_STATUS +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0 +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1 +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4 +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8 +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18 +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L +#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L +//HPD1_DC_HPD_INT_CONTROL +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0 +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8 +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10 +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14 +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18 +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L +#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L +//HPD1_DC_HPD_CONTROL +#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0 +#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10 +#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c +#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL +#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L +#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L +//HPD1_DC_HPD_FAST_TRAIN_CNTL +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0 +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18 +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L +#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L +//HPD1_DC_HPD_TOGGLE_FILT_CNTL +#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0 +#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14 +#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL +#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L + +// addressBlock: dce_dc_dio_dp_aux0_dispdec +//DP_AUX0_AUX_CONTROL +#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0 +#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4 +#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5 +#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8 +#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc +#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10 +#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12 +#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14 +#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18 +#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c +#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d +#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e +#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f +#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L +#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L +#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L +#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L +#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L +#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L +#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L +#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L +#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L +#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L +#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L +#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L +#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L +//DP_AUX0_AUX_SW_CONTROL +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0 +#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2 +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4 +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10 +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L +#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L +#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L +//DP_AUX0_AUX_ARB_CONTROL +#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19 +#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL +#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L +#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L +//DP_AUX0_AUX_INTERRUPT_CONTROL +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6 +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L +#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L +//DP_AUX0_AUX_SW_STATUS +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17 +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18 +#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L +#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L +#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L +//DP_AUX0_AUX_LS_STATUS +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18 +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L +#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L +//DP_AUX0_AUX_SW_DATA +#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0 +#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8 +#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10 +#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f +#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L +#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L +#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L +#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L +//DP_AUX0_AUX_LS_DATA +#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8 +#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10 +#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L +#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L +//DP_AUX0_AUX_DPHY_TX_REF_CONTROL +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0 +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4 +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10 +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L +#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L +//DP_AUX0_AUX_DPHY_TX_CONTROL +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0 +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4 +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6 +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8 +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10 +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L +#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L +//DP_AUX0_AUX_DPHY_RX_CONTROL0 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14 +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L +#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L +//DP_AUX0_AUX_DPHY_RX_CONTROL1 +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0 +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8 +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L +#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L +//DP_AUX0_AUX_DPHY_TX_STATUS +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0 +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4 +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10 +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L +#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L +//DP_AUX0_AUX_DPHY_RX_STATUS +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0 +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8 +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10 +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15 +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L +#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L + +// addressBlock: dce_dc_dio_dp_aux1_dispdec +//DP_AUX1_AUX_CONTROL +#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0 +#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4 +#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5 +#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8 +#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc +#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10 +#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12 +#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14 +#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18 +#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c +#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d +#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e +#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f +#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L +#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L +#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L +#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L +#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L +#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L +#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L +#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L +#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L +#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L +#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L +#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L +#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L +//DP_AUX1_AUX_SW_CONTROL +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0 +#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2 +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4 +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10 +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L +#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L +#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L +//DP_AUX1_AUX_ARB_CONTROL +#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19 +#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL +#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L +#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L +//DP_AUX1_AUX_INTERRUPT_CONTROL +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6 +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L +#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L +//DP_AUX1_AUX_SW_STATUS +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17 +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18 +#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L +#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L +#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L +//DP_AUX1_AUX_LS_STATUS +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18 +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L +#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L +//DP_AUX1_AUX_SW_DATA +#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0 +#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8 +#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10 +#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f +#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L +#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L +#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L +#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L +//DP_AUX1_AUX_LS_DATA +#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8 +#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10 +#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L +#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L +//DP_AUX1_AUX_DPHY_TX_REF_CONTROL +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0 +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4 +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10 +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L +#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L +//DP_AUX1_AUX_DPHY_TX_CONTROL +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0 +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4 +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6 +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8 +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10 +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L +#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L +//DP_AUX1_AUX_DPHY_RX_CONTROL0 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14 +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L +#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L +//DP_AUX1_AUX_DPHY_RX_CONTROL1 +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0 +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8 +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L +#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L +//DP_AUX1_AUX_DPHY_TX_STATUS +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0 +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4 +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10 +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L +#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L +//DP_AUX1_AUX_DPHY_RX_STATUS +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0 +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8 +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10 +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15 +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L +#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L + + +// addressBlock: dce_dc_dio_dig0_dispdec +//DIG0_DIG_FE_CNTL +#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0 +#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4 +#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8 +#define DIG0_DIG_FE_CNTL__DIG_START__SHIFT 0xa +#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc +#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10 +#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12 +#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13 +#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18 +#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c +#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e +#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L +#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L +#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L +#define DIG0_DIG_FE_CNTL__DIG_START_MASK 0x00000400L +#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L +#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L +#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L +#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L +#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L +#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L +#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L +//DIG0_DIG_OUTPUT_CRC_CNTL +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0 +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4 +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8 +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L +#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L +//DIG0_DIG_OUTPUT_CRC_RESULT +#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0 +#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL +//DIG0_DIG_CLOCK_PATTERN +#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0 +#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL +//DIG0_DIG_TEST_PATTERN +#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0 +#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1 +#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4 +#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5 +#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6 +#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10 +#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L +#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L +#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L +#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L +#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L +#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L +//DIG0_DIG_RANDOM_PATTERN_SEED +#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0 +#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18 +#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL +#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L +//DIG0_DIG_FIFO_STATUS +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16 +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L +#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L +//DIG0_HDMI_METADATA_PACKET_CONTROL +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0 +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4 +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8 +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10 +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L +#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL4 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL +#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L +//DIG0_HDMI_CONTROL +#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0 +#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1 +#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2 +#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3 +#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4 +#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8 +#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9 +#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18 +#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c +#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L +#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L +#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L +#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L +#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L +#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L +#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L +#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L +#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L +//DIG0_HDMI_STATUS +#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0 +#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10 +#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14 +#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b +#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L +#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L +#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L +#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L +//DIG0_HDMI_AUDIO_PACKET_CONTROL +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4 +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8 +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10 +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L +#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L +//DIG0_HDMI_ACR_PACKET_CONTROL +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0 +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1 +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4 +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8 +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10 +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L +#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L +//DIG0_HDMI_VBI_PACKET_CONTROL +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0 +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4 +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5 +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8 +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9 + +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10 + +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L + +#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L + +//DIG0_HDMI_INFOFRAME_CONTROL0 +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4 +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5 +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8 +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9 +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L +#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L +//DIG0_HDMI_INFOFRAME_CONTROL1 +#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8 +#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10 +#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L +#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L + +#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0 +#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2 +#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4 +#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8 +#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc +#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L +#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L +#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L +#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L +#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L +//DIG0_AFMT_AUDIO_PACKET_CONTROL2 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L +//DIG0_AFMT_ISRC1_0 +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0 +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6 +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7 +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L +#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L +//DIG0_AFMT_ISRC1_1 +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0 +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8 +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10 +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18 +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L +//DIG0_AFMT_ISRC1_2 +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0 +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8 +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10 +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18 +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L +//DIG0_AFMT_ISRC1_3 +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0 +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8 +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10 +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18 +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L +//DIG0_AFMT_ISRC1_4 +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0 +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8 +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10 +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18 +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L +//DIG0_AFMT_ISRC2_0 +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0 +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8 +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10 +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18 +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L +//DIG0_AFMT_ISRC2_1 +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0 +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8 +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10 +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18 +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L +//DIG0_AFMT_ISRC2_2 +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0 +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8 +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10 +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18 +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L +//DIG0_AFMT_ISRC2_3 +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0 +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8 +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10 +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18 +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L +#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL2 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL +#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL3 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL +#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L +//DIG0_HDMI_DB_CONTROL +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0 +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4 +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5 +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8 +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10 +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11 +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L +#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L +#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L +//DIG0_DME_CONTROL +#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0 +#define DIG0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4 +#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8 +#define DIG0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc +#define DIG0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd +#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10 +#define DIG0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14 +#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L +#define DIG0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L +#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L +#define DIG0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L +#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L +#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L +#define DIG0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L +//DIG0_AFMT_MPEG_INFO0 +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0 +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8 +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10 +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18 +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L +#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L +//DIG0_AFMT_MPEG_INFO1 +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0 +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8 +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L +#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L +//DIG0_AFMT_GENERIC_HDR +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_0 +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_1 +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_2 +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_3 +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_4 +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_5 +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_6 +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L +//DIG0_AFMT_GENERIC_7 +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0 +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8 +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10 +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18 +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L +#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL1 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL +#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L +//DIG0_HDMI_ACR_32_0 +#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc +#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L +//DIG0_HDMI_ACR_32_1 +#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0 +#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL +//DIG0_HDMI_ACR_44_0 +#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc +#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L +//DIG0_HDMI_ACR_44_1 +#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0 +#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL +//DIG0_HDMI_ACR_48_0 +#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc +#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L +//DIG0_HDMI_ACR_48_1 +#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0 +#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL +//DIG0_HDMI_ACR_STATUS_0 +#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc +#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L +//DIG0_HDMI_ACR_STATUS_1 +#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0 +#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL +//DIG0_AFMT_AUDIO_INFO0 +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8 +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10 +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18 +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L +#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L +//DIG0_AFMT_AUDIO_INFO1 +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10 +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L +#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L +//DIG0_AFMT_60958_0 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18 +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c +#define DIG0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L +#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L +//DIG0_AFMT_60958_1 +#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0 +#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4 +#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10 +#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12 +#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14 +#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL +#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L +#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L +#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L +#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L +//DIG0_AFMT_AUDIO_CRC_CONTROL +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4 +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8 +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10 +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L +#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L +//DIG0_AFMT_RAMP_CONTROL0 +#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0 +#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f +#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL +#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L +//DIG0_AFMT_RAMP_CONTROL1 +#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0 +#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18 +#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL +#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L +//DIG0_AFMT_RAMP_CONTROL2 +#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0 +#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL +//DIG0_AFMT_RAMP_CONTROL3 +#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0 +#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL +//DIG0_AFMT_60958_2 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14 +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L +#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L +//DIG0_AFMT_AUDIO_CRC_RESULT +#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8 +#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L +#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L +//DIG0_AFMT_STATUS +#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4 +#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8 +#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18 +#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e +#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L +#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L +#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L +#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L +//DIG0_AFMT_AUDIO_PACKET_CONTROL +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18 +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L +#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L +//DIG0_AFMT_VBI_PACKET_CONTROL +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8 + +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10 +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11 +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L + +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L +#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L +//DIG0_AFMT_INFOFRAME_CONTROL0 +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6 +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7 +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L +#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L +//DIG0_AFMT_AUDIO_SRC_CONTROL +#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0 +#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L +//DIG0_DIG_BE_CNTL +#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0 +#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1 +#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2 +#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8 +#define DIG0_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10 +#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c +#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L +#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L +#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L +#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L +#define DIG0_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L +#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L +//DIG0_DIG_BE_EN_CNTL +#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0 +#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8 +#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L +#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L +//DIG0_TMDS_CNTL +#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0 +#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L +//DIG0_TMDS_CONTROL_CHAR +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0 +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1 +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2 +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3 +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L +#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L +//DIG0_TMDS_CONTROL0_FEEDBACK +#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0 +#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8 +#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L +#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L +//DIG0_TMDS_STEREOSYNC_CTL_SEL +#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0 +#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L +//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL +#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L +//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10 +#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL +#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L + +//DIG0_TMDS_CTL_BITS +#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0 +#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8 +#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10 +#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18 +#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L +#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L +#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L +#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L +//DIG0_TMDS_DCBALANCER_CONTROL +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0 +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4 +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8 +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10 +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18 +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L +#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L +//DIG0_TMDS_SYNC_DCBALANCE_CHAR +#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0 +#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10 +#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL +#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L +//DIG0_TMDS_CTL0_1_GEN_CNTL +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18 +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L +#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L +//DIG0_TMDS_CTL2_3_GEN_CNTL +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18 +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L +#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L + +//DIG0_DIG_VERSION +#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0 +#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L +//DIG0_DIG_LANE_ENABLE +#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0 +#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1 +#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2 +#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3 +#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8 +#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L +#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L +#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L +#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L +#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L + +//DIG0_AFMT_CNTL +#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0 +#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8 +#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L +#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L +//DIG0_AFMT_VBI_PACKET_CONTROL1 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19 +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L +#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L +//DIG0_HDMI_GENERIC_PACKET_CONTROL5 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9 +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L +#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L + + +// addressBlock: dce_dc_dio_dp0_dispdec +//DP0_DP_LINK_CNTL +#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4 +#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8 +#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11 +#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L +#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L +#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L +//DP0_DP_PIXEL_FORMAT +#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0 +#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18 +#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c +#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L +#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L +#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L +//DP0_DP_MSA_COLORIMETRY +#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18 +#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L +//DP0_DP_CONFIG +#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0 +#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L +//DP0_DP_VID_STREAM_CNTL +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0 +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8 +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10 +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14 +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L +#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L +//DP0_DP_STEER_FIFO +#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0 +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4 +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5 +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6 +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7 +#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8 +#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc +#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L +#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L +#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L +#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L +//DP0_DP_MSA_MISC +#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0 +#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8 +#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10 +#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18 +#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL +#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L +#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L +#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L +//DP0_DP_VID_TIMING +#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4 +#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8 +#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa +#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc +#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18 +#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L +#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L +#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L +#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L +#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L +//DP0_DP_VID_N +#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0 +#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL +//DP0_DP_VID_M +#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0 +#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL +//DP0_DP_LINK_FRAMING_CNTL +#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0 +#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18 +#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c +#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL +#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L +#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L +//DP0_DP_HBR2_EYE_PATTERN +#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0 +#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L +//DP0_DP_VID_MSA_VBID +#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0 +#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18 +#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL +#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L +//DP0_DP_VID_INTERRUPT_CNTL +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0 +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1 +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2 +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L +#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L +//DP0_DP_DPHY_CNTL +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0 +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1 +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2 +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3 +#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4 +#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5 +#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6 +#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10 +#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18 +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L +#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L +#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L +#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L +#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L +#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L +#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L +//DP0_DP_DPHY_TRAINING_PATTERN_SEL +#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0 +#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L +//DP0_DP_DPHY_SYM0 +#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0 +#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa +#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14 +#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL +#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L +#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L +//DP0_DP_DPHY_SYM1 +#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0 +#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa +#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14 +#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL +#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L +#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L +//DP0_DP_DPHY_SYM2 +#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0 +#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa +#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL +#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L +//DP0_DP_DPHY_8B10B_CNTL +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8 +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10 +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18 +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L +#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L +//DP0_DP_DPHY_PRBS_CNTL +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0 +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L +#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L +//DP0_DP_DPHY_SCRAM_CNTL +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0 +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18 +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L +#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L +//DP0_DP_DPHY_CRC_EN +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0 +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4 +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8 +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L +#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L +//DP0_DP_DPHY_CRC_CNTL +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0 +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4 +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10 +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L +#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L +//DP0_DP_DPHY_CRC_RESULT +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0 +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8 +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10 +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18 +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L +#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L +//DP0_DP_DPHY_CRC_MST_CNTL +#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0 +#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8 +#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL +#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L +//DP0_DP_DPHY_CRC_MST_STATUS +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0 +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8 +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10 +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L +#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L +//DP0_DP_DPHY_FAST_TRAINING +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0 +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1 +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2 +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8 +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14 +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L +#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L +//DP0_DP_DPHY_FAST_TRAINING_STATUS +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0 +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4 +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8 +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L +#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L +//DP0_DP_SEC_CNTL +#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0 +#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4 +#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8 +#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc +#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19 +#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a +#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b +#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c +#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L +#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L +#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L +#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L +#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L +#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L +#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L +//DP0_DP_SEC_CNTL1 +#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9 +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10 +#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L +#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L +//DP0_DP_SEC_FRAMING1 +#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0 +#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL +#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP0_DP_SEC_FRAMING2 +#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0 +#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL +#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP0_DP_SEC_FRAMING3 +#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0 +#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL +#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP0_DP_SEC_FRAMING4 +#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0 +#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14 +#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18 +#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c +#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d +#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L +#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L +#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L +#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L +#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L +//DP0_DP_SEC_AUD_N +#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0 +#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL +//DP0_DP_SEC_AUD_N_READBACK +#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0 +#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL +//DP0_DP_SEC_AUD_M +#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0 +#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL +//DP0_DP_SEC_AUD_M_READBACK +#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0 +#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL +//DP0_DP_SEC_TIMESTAMP +#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0 +#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L +//DP0_DP_SEC_PACKET_CNTL +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1 +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4 +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8 +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10 +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L +#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L +//DP0_DP_MSE_RATE_CNTL +#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0 +#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a +#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL +#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L +//DP0_DP_CP_MSE_STATUS +//DP0_DP_MSE_RATE_UPDATE +#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0 +#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L +//DP0_DP_MSE_SAT0 +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0 +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8 +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10 +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18 +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L +#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L +//DP0_DP_MSE_SAT1 +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0 +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8 +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10 +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18 +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L +#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L +//DP0_DP_MSE_SAT2 +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0 +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8 +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10 +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18 +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L +#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L +//DP0_DP_MSE_SAT_UPDATE +#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0 +#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8 +#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L +#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L +//DP0_DP_MSE_LINK_TIMING +#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0 +#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10 +#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL +#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L +//DP0_DP_MSE_MISC_CNTL +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0 +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4 +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8 +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L +#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L + +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0 +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10 +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L +#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L +//DP0_DP_DPHY_HBR2_PATTERN_CONTROL +#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0 +#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L +//DP0_DP_MSE_SAT0_STATUS +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0 +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8 +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10 +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18 +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L +#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L +//DP0_DP_MSE_SAT1_STATUS +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0 +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8 +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10 +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18 +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L +#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L +//DP0_DP_MSE_SAT2_STATUS +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0 +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8 +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10 +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18 +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L +#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L +//DP0_DP_MSA_TIMING_PARAM1 +#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0 +#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10 +#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL +#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L +//DP0_DP_MSA_TIMING_PARAM2 +#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0 +#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10 +#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL +#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L +//DP0_DP_MSA_TIMING_PARAM3 +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0 +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10 +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L +#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L +//DP0_DP_MSA_TIMING_PARAM4 +#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0 +#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10 +#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL +#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L +//DP0_DP_DSC_CNTL +#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0 +#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10 +#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L +#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L +//DP0_DP_SEC_CNTL2 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19 +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L +#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L +//DP0_DP_SEC_CNTL3 +#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0 +#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10 +#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL +#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L +//DP0_DP_SEC_CNTL4 +#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0 +#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10 +#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL +#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L +//DP0_DP_SEC_CNTL5 +#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0 +#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10 +#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL +#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L +//DP0_DP_SEC_CNTL6 +#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0 +#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL +//DP0_DP_SEC_CNTL7 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19 +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L +#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L +//DP0_DP_DB_CNTL +#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0 +#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4 +#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5 +#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8 +#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10 +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11 +#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L +#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L +#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L +#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L +#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L +#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L +//DP0_DP_MSA_VBID_MISC +#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0 +#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4 +#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8 +#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9 +#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc +#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd +#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf +#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10 +#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L +#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L +#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L +#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L +#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L +#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L +#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L +#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L +//DP0_DP_SEC_METADATA_TRANSMISSION +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0 +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1 +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4 +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10 +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L +#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L +//DP0_DP_DSC_BYTES_PER_PIXEL +#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0 +#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL + + +// addressBlock: dce_dc_dio_dig1_dispdec +//DIG1_DIG_FE_CNTL +#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0 +#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4 +#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8 +#define DIG1_DIG_FE_CNTL__DIG_START__SHIFT 0xa +#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc +#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10 +#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12 +#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13 +#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18 +#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c +#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e +#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L +#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L +#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L +#define DIG1_DIG_FE_CNTL__DIG_START_MASK 0x00000400L +#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L +#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L +#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L +#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L +#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L +#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L +#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L +//DIG1_DIG_OUTPUT_CRC_CNTL +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0 +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4 +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8 +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L +#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L +//DIG1_DIG_OUTPUT_CRC_RESULT +#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0 +#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL +//DIG1_DIG_CLOCK_PATTERN +#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0 +#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL +//DIG1_DIG_TEST_PATTERN +#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0 +#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1 +#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4 +#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5 +#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6 +#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10 +#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L +#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L +#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L +#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L +#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L +#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L +//DIG1_DIG_RANDOM_PATTERN_SEED +#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0 +#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18 +#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL +#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L +//DIG1_DIG_FIFO_STATUS +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16 +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L +#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L +//DIG1_HDMI_METADATA_PACKET_CONTROL +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0 +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4 +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8 +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10 +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L +#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL4 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL +#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L +//DIG1_HDMI_CONTROL +#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0 +#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1 +#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2 +#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3 +#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4 +#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8 +#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9 +#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18 +#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c +#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L +#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L +#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L +#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L +#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L +#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L +#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L +#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L +#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L +//DIG1_HDMI_STATUS +#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0 +#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10 +#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14 +#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b +#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L +#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L +#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L +#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L +//DIG1_HDMI_AUDIO_PACKET_CONTROL +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4 +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8 +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10 +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L +#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L +//DIG1_HDMI_ACR_PACKET_CONTROL +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0 +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1 +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4 +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8 +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10 +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L +#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L +//DIG1_HDMI_VBI_PACKET_CONTROL +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0 +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4 +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5 +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8 +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9 + +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10 + +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L +#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L + +//DIG1_HDMI_INFOFRAME_CONTROL0 +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4 +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5 +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8 +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9 +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L +#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L +//DIG1_HDMI_INFOFRAME_CONTROL1 +#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8 +#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10 +#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L +#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L + +//DIG1_HDMI_GC +#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0 +#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2 +#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4 +#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8 +#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc +#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L +#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L +#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L +#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L +#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L +//DIG1_AFMT_AUDIO_PACKET_CONTROL2 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L +//DIG1_AFMT_ISRC1_0 +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0 +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6 +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7 +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L +#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L +//DIG1_AFMT_ISRC1_1 +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0 +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8 +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10 +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18 +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L +//DIG1_AFMT_ISRC1_2 +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0 +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8 +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10 +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18 +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L +//DIG1_AFMT_ISRC1_3 +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0 +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8 +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10 +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18 +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L +//DIG1_AFMT_ISRC1_4 +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0 +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8 +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10 +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18 +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L +//DIG1_AFMT_ISRC2_0 +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0 +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8 +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10 +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18 +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L +//DIG1_AFMT_ISRC2_1 +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0 +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8 +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10 +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18 +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L +//DIG1_AFMT_ISRC2_2 +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0 +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8 +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10 +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18 +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L +//DIG1_AFMT_ISRC2_3 +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0 +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8 +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10 +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18 +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L +#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL2 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL +#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL3 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL +#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L +//DIG1_HDMI_DB_CONTROL +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0 +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4 +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5 +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8 +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10 +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11 +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L +#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L +#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L +//DIG1_DME_CONTROL +#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0 +#define DIG1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4 +#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8 +#define DIG1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc +#define DIG1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd +#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10 +#define DIG1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14 +#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L +#define DIG1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L +#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L +#define DIG1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L +#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L +#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L +#define DIG1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L +//DIG1_AFMT_MPEG_INFO0 +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0 +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8 +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10 +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18 +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L +#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L +//DIG1_AFMT_MPEG_INFO1 +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0 +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8 +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L +#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L +//DIG1_AFMT_GENERIC_HDR +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_0 +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_1 +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_2 +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_3 +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_4 +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_5 +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_6 +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L +//DIG1_AFMT_GENERIC_7 +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0 +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8 +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10 +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18 +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L +#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL1 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL +#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L +//DIG1_HDMI_ACR_32_0 +#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc +#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L +//DIG1_HDMI_ACR_32_1 +#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0 +#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL +//DIG1_HDMI_ACR_44_0 +#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc +#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L +//DIG1_HDMI_ACR_44_1 +#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0 +#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL +//DIG1_HDMI_ACR_48_0 +#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc +#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L +//DIG1_HDMI_ACR_48_1 +#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0 +#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL +//DIG1_HDMI_ACR_STATUS_0 +#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc +#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L +//DIG1_HDMI_ACR_STATUS_1 +#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0 +#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL +//DIG1_AFMT_AUDIO_INFO0 +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8 +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10 +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18 +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L +#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L +//DIG1_AFMT_AUDIO_INFO1 +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10 +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L +#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L +//DIG1_AFMT_60958_0 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18 +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c +#define DIG1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L +#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L +//DIG1_AFMT_60958_1 +#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0 +#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4 +#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10 +#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12 +#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14 +#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL +#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L +#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L +#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L +#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L +//DIG1_AFMT_AUDIO_CRC_CONTROL +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4 +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8 +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10 +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L +#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L +//DIG1_AFMT_RAMP_CONTROL0 +#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0 +#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f +#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL +#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L +//DIG1_AFMT_RAMP_CONTROL1 +#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0 +#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18 +#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL +#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L +//DIG1_AFMT_RAMP_CONTROL2 +#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0 +#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL +//DIG1_AFMT_RAMP_CONTROL3 +#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0 +#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL +//DIG1_AFMT_60958_2 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14 +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L +#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L +//DIG1_AFMT_AUDIO_CRC_RESULT +#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8 +#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L +#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L +//DIG1_AFMT_STATUS +#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4 +#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8 +#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18 +#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e +#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L +#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L +#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L +#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L +//DIG1_AFMT_AUDIO_PACKET_CONTROL +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18 +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L +#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L +//DIG1_AFMT_VBI_PACKET_CONTROL +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8 +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10 +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11 +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L +#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L +//DIG1_AFMT_INFOFRAME_CONTROL0 +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6 +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7 +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L +#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L +//DIG1_AFMT_AUDIO_SRC_CONTROL +#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0 +#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L +//DIG1_DIG_BE_CNTL +#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0 +#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1 +#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2 +#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8 +#define DIG1_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10 +#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c +#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L +#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L +#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L +#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L +#define DIG1_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L +#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L +//DIG1_DIG_BE_EN_CNTL +#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0 +#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8 +#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L +#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L +//DIG1_TMDS_CNTL +#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0 +#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L +//DIG1_TMDS_CONTROL_CHAR +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0 +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1 +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2 +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3 +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L +#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L +//DIG1_TMDS_CONTROL0_FEEDBACK +#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0 +#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8 +#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L +#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L +//DIG1_TMDS_STEREOSYNC_CTL_SEL +#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0 +#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L +//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL +#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L +//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10 +#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL +#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L + +//DIG1_TMDS_CTL_BITS +#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0 +#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8 +#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10 +#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18 +#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L +#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L +#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L +#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L +//DIG1_TMDS_DCBALANCER_CONTROL +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0 +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4 +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8 +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10 +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18 +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L +#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L +//DIG1_TMDS_SYNC_DCBALANCE_CHAR +#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0 +#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10 +#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL +#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L +//DIG1_TMDS_CTL0_1_GEN_CNTL +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18 +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L +#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L +//DIG1_TMDS_CTL2_3_GEN_CNTL +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18 +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L +#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L + +//DIG1_DIG_VERSION +#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0 +#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L +//DIG1_DIG_LANE_ENABLE +#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0 +#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1 +#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2 +#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3 +#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8 +#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L +#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L +#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L +#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L +#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L + +//DIG1_AFMT_CNTL +#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0 +#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8 +#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L +#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L +//DIG1_AFMT_VBI_PACKET_CONTROL1 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19 +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L +#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L +//DIG1_HDMI_GENERIC_PACKET_CONTROL5 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9 +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L +#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L + + +// addressBlock: dce_dc_dio_dp1_dispdec +//DP1_DP_LINK_CNTL +#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4 +#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8 +#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11 +#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L +#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L +#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L +//DP1_DP_PIXEL_FORMAT +#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0 +#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18 +#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c +#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L +#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L +#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L +//DP1_DP_MSA_COLORIMETRY +#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18 +#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L +//DP1_DP_CONFIG +#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0 +#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L +//DP1_DP_VID_STREAM_CNTL +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0 +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8 +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10 +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14 +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L +#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L +//DP1_DP_STEER_FIFO +#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0 +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4 +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5 +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6 +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7 +#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8 +#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc +#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L +#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L +#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L +#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L +//DP1_DP_MSA_MISC +#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0 +#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8 +#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10 +#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18 +#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL +#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L +#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L +#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L +//DP1_DP_VID_TIMING +#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4 +#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8 +#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa +#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc +#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18 +#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L +#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L +#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L +#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L +#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L +//DP1_DP_VID_N +#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0 +#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL +//DP1_DP_VID_M +#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0 +#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL +//DP1_DP_LINK_FRAMING_CNTL +#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0 +#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18 +#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c +#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL +#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L +#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L +//DP1_DP_HBR2_EYE_PATTERN +#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0 +#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L +//DP1_DP_VID_MSA_VBID +#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0 +#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18 +#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL +#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L +//DP1_DP_VID_INTERRUPT_CNTL +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0 +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1 +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2 +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L +#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L +//DP1_DP_DPHY_CNTL +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0 +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1 +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2 +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3 +#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4 +#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5 +#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6 +#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10 +#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18 +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L +#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L +#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L +#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L +#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L +#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L +#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L +//DP1_DP_DPHY_TRAINING_PATTERN_SEL +#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0 +#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L +//DP1_DP_DPHY_SYM0 +#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0 +#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa +#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14 +#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL +#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L +#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L +//DP1_DP_DPHY_SYM1 +#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0 +#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa +#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14 +#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL +#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L +#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L +//DP1_DP_DPHY_SYM2 +#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0 +#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa +#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL +#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L +//DP1_DP_DPHY_8B10B_CNTL +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8 +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10 +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18 +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L +#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L +//DP1_DP_DPHY_PRBS_CNTL +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0 +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4 +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8 +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L +#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L +//DP1_DP_DPHY_SCRAM_CNTL +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0 +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4 +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8 +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18 +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L +#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L +//DP1_DP_DPHY_CRC_EN +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0 +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4 +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8 +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L +#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L +//DP1_DP_DPHY_CRC_CNTL +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0 +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4 +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10 +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L +#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L +//DP1_DP_DPHY_CRC_RESULT +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0 +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8 +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10 +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18 +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L +#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L +//DP1_DP_DPHY_CRC_MST_CNTL +#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0 +#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8 +#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL +#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L +//DP1_DP_DPHY_CRC_MST_STATUS +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0 +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8 +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10 +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L +#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L +//DP1_DP_DPHY_FAST_TRAINING +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0 +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1 +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2 +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8 +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14 +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L +#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L +//DP1_DP_DPHY_FAST_TRAINING_STATUS +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0 +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4 +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8 +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L +#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L +//DP1_DP_SEC_CNTL +#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0 +#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4 +#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8 +#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc +#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19 +#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a +#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b +#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c +#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L +#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L +#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L +#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L +#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L +#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L +#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L +//DP1_DP_SEC_CNTL1 +#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9 +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10 +#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L +#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L +//DP1_DP_SEC_FRAMING1 +#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0 +#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL +#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP1_DP_SEC_FRAMING2 +#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0 +#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL +#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP1_DP_SEC_FRAMING3 +#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0 +#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10 +#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL +#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L +//DP1_DP_SEC_FRAMING4 +#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0 +#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14 +#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18 +#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c +#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d +#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L +#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L +#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L +#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L +#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L +//DP1_DP_SEC_AUD_N +#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0 +#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL +//DP1_DP_SEC_AUD_N_READBACK +#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0 +#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL +//DP1_DP_SEC_AUD_M +#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0 +#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL +//DP1_DP_SEC_AUD_M_READBACK +#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0 +#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL +//DP1_DP_SEC_TIMESTAMP +#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0 +#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L +//DP1_DP_SEC_PACKET_CNTL +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1 +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4 +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8 +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10 +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L +#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L +//DP1_DP_MSE_RATE_CNTL +#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0 +#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a +#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL +#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L +//DP1_DP_CP_MSE_STATUS +//DP1_DP_MSE_RATE_UPDATE +#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0 +#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L +//DP1_DP_MSE_SAT0 +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0 +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8 +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10 +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18 +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L +#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L +//DP1_DP_MSE_SAT1 +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0 +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8 +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10 +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18 +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L +#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L +//DP1_DP_MSE_SAT2 +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0 +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8 +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10 +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18 +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L +#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L +//DP1_DP_MSE_SAT_UPDATE +#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0 +#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8 +#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L +#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L +//DP1_DP_MSE_LINK_TIMING +#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0 +#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10 +#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL +#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L +//DP1_DP_MSE_MISC_CNTL +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0 +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4 +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8 +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L +#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L + +//DP1_DP_DPHY_BS_SR_SWAP_CNTL +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0 +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10 +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L +#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L +//DP1_DP_DPHY_HBR2_PATTERN_CONTROL +#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0 +#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L +//DP1_DP_MSE_SAT0_STATUS +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0 +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8 +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10 +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18 +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L +#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L +//DP1_DP_MSE_SAT1_STATUS +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0 +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8 +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10 +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18 +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L +#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L +//DP1_DP_MSE_SAT2_STATUS +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0 +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8 +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10 +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18 +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L +#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L +//DP1_DP_MSA_TIMING_PARAM1 +#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0 +#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10 +#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL +#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L +//DP1_DP_MSA_TIMING_PARAM2 +#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0 +#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10 +#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL +#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L +//DP1_DP_MSA_TIMING_PARAM3 +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0 +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10 +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L +#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L +//DP1_DP_MSA_TIMING_PARAM4 +#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0 +#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10 +#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL +#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L +//DP1_DP_DSC_CNTL +#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0 +#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10 +#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L +#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L +//DP1_DP_SEC_CNTL2 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19 +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L +#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L +//DP1_DP_SEC_CNTL3 +#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0 +#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10 +#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL +#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L +//DP1_DP_SEC_CNTL4 +#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0 +#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10 +#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL +#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L +//DP1_DP_SEC_CNTL5 +#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0 +#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10 +#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL +#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L +//DP1_DP_SEC_CNTL6 +#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0 +#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL +//DP1_DP_SEC_CNTL7 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19 +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L +#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L +//DP1_DP_DB_CNTL +#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0 +#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4 +#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5 +#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8 +#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10 +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11 +#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L +#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L +#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L +#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L +#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L +#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L +//DP1_DP_MSA_VBID_MISC +#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0 +#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4 +#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8 +#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9 +#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc +#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd +#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf +#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10 +#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L +#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L +#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L +#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L +#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L +#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L +#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L +#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L +//DP1_DP_SEC_METADATA_TRANSMISSION +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0 +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1 +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4 +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10 +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L +#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L +//DP1_DP_DSC_BYTES_PER_PIXEL +#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0 +#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL + + +// addressBlock: dce_dc_dcio_dcio_dispdec +//DC_GENERICA +#define DC_GENERICA__GENERICA_EN__SHIFT 0x0 +#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7 +#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10 +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14 +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18 +#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L +#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L +#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x0000F000L +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x000F0000L +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00F00000L +#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x0F000000L +//UNIPHYA_LINK_CNTL +#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0 +#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4 +#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8 +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf +#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14 +#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18 +#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L +#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L +#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L +#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L +#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L +#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L +//UNIPHYA_CHANNEL_XBAR_CNTL +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0 +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8 +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10 +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18 +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L +#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L +//UNIPHYB_LINK_CNTL +#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0 +#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4 +#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8 +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf +#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14 +#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18 +#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L +#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L +#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L +#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L +#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L +#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L +//UNIPHYB_CHANNEL_XBAR_CNTL +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0 +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8 +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10 +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18 +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L +#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L +//DC_PINSTRAPS +#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd +#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe +#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10 +#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11 +#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L +#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L +#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L +#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0x000E0000L +//DCIO_CLOCK_CNTL +#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5 +#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L + +// addressBlock: dce_dc_dcio_dcio_chip_dispdec +//DC_GPIO_DDC1_MASK +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe +#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10 +#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14 +#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18 +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L +#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L +#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L +#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L +#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L +//DC_GPIO_DDC1_A +#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0 +#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8 +#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L +#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L +//DC_GPIO_DDC1_EN +#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0 +#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8 +#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L +#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L +//DC_GPIO_DDC1_Y +#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0 +#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8 +#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L +#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L +//DC_GPIO_DDC2_MASK +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe +#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10 +#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14 +#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18 +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L +#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L +#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L +#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L +#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L +//DC_GPIO_DDC2_A +#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0 +#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8 +#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L +#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L +//DC_GPIO_DDC2_EN +#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0 +#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8 +#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L +#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L +//DC_GPIO_DDC2_Y +#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0 +#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8 +#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L +#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L +//DC_GPIO_HPD_MASK +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0 +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1 +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2 +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL__SHIFT 0x3 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19 +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x00000002L +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x00000004L +#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL_MASK 0x00000008L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L +#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L +//DC_GPIO_HPD_A +#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0 +#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8 +#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10 +#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18 +#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a +#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c +#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L +#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L +#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L +#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L +#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L +#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L + + +//DC_GPIO_HPD_EN +#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0 +#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1 +#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2 +#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3 +#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4 +#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5 +#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6 +#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7 +#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8 +#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9 +#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L +#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L +#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L +#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x00000008L +#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x00000010L +#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L +#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x00000080L +#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L +#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L + +//DC_GPIO_HPD_Y +#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0 +#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8 +#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L +#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L +//DC_GPIO_PAD_STRENGTH_1 +#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0 +#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4 +#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8 +#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc +#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10 +#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14 +#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18 +#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c +#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL +#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L +#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0x00000F00L +#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0x0000F000L +#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L +#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L +#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L +#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L +//PHY_AUX_CNTL +#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0 +#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1 +#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2 +#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3 +#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4 +#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5 +#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6 +#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7 +#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0x8 +#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9 +#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa +#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc +#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN__SHIFT 0x17 +#define PHY_AUX_CNTL__AUX_CAL_SPARE__SHIFT 0x18 +#define PHY_AUX_CNTL__AUX_CAL_BIASENTST__SHIFT 0x1c +#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x00000001L +#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x00000002L +#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x00000004L +#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x00000008L +#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x00000010L +#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x00000020L +#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x00000040L +#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x00000080L +#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x00000100L +#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L +#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L +#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L +#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN_MASK 0x00800000L +#define PHY_AUX_CNTL__AUX_CAL_SPARE_MASK 0x03000000L +#define PHY_AUX_CNTL__AUX_CAL_BIASENTST_MASK 0x70000000L +//DC_GPIO_AUX_CTRL_1 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x9 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19 +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00000200L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00000800L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00001000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x00040000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L +#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0x40000000L +//DC_GPIO_AUX_CTRL_2 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18 +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L +#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L +//DC_GPIO_AUX_CTRL_3 +#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1 +#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9 +#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10 +#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L +#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L +#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L +#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L +#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L +#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L +//DC_GPIO_AUX_CTRL_4 +#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL +#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L +//DC_GPIO_AUX_CTRL_5 +#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2 +#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd +#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13 +#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18 +#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19 +#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L +#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL +#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L +#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L +#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L +#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L +#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L +//AUXI2C_PAD_ALL_PWR_OK +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L + + +// addressBlock: azf0endpoint0_endpointind +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L + +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L +//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL +#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L +//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L +//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L +//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L +//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L +//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L + + +// addressBlock: azf0endpoint1_endpointind +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L + +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L +//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL +#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L +//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L +//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L +//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L +//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L +//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8 +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L +#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L + + +// addressBlock: azf0inputendpoint0_inputendpointind +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L + +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10 +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L +#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L +// addressBlock: azf0inputendpoint0_inputendpointind +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L +//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10 +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L +#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L + + +// addressBlock: azf0inputendpoint1_inputendpointind +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L +//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10 +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L +#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h index 312c50ea30f3..f268d33c4744 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h @@ -436,6 +436,8 @@ #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regDCCG_GATE_DISABLE_CNTL3 0x005a #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2 +#define regHDMISTREAMCLK0_DTO_PARAM 0x005b +#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2 #define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061 #define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2 #define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h index a9d553ef26c0..1f21f313bd1d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h @@ -1438,6 +1438,14 @@ #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L #define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L +//HDMISTREAMCLK0_DTO_PARAM +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10 +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L +#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L + //DCCG_AUDIO_DTBCLK_DTO_PHASE #define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0 #define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h index bd37aa6b6560..b4b2584bbd66 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -77,4 +77,9 @@ #define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL #define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL +#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow 0x067e +#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 0 +#define mmDF_NCS_PG0_HardwareAssertMaskHigh 0x067f +#define mmDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 0 + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index f804e13b002e..f45ec6f97ff2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -62,4 +62,136 @@ #define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L #define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L +//DF_CS_UMC_AON0_HardwareAssertMaskLow +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19 +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L +#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L + +//DF_NCS_PG0_HardwareAssertMaskHigh +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19 +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L +#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h new file mode 100755 index 000000000000..3c2f270fb3bb --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _dpcs_2_0_3_OFFSET_HEADER +#define _dpcs_2_0_3_OFFSET_HEADER +// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec +// base address: 0x0 +#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928 +#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929 +#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a +#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b +#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d +#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 + + +// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec +// base address: 0x0 +#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930 +#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931 +#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932 +#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933 +#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934 +#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935 +#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939 +#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e +#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2 + + +// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec +// base address: 0x360 +#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00 +#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01 +#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02 +#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03 +#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05 +#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 + + +// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec +// base address: 0x360 +#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08 +#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09 +#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a +#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b +#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 +#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c +#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2 +#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d +#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11 +#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26 +#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h new file mode 100755 index 000000000000..a6d076530117 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h @@ -0,0 +1,952 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _dpcs_2_0_3_SH_MASK_HEADER +#define _dpcs_2_0_3_SH_MASK_HEADER +// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec +//DPCSTX0_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX0_DPCSTX_TX_CNTL +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX0_DPCSTX_CBUS_CNTL +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX0_DPCSTX_INTERRUPT_CNTL +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX0_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +//DPCSTX0_DPCSTX_DEBUG_CONFIG +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18 +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L +//DPCSTX0_DPCSTX_TEST_DEBUG_DATA +#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA__SHIFT 0x0 +#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec +//RDPCSTX0_RDPCSTX_CNTL +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_CLOCK_CNTL +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX0_RDPCS_TX_CR_ADDR +#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX0_RDPCS_TX_CR_DATA +#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX0_RDPCSTX_SCRATCH +#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX0_RDPCSTX_PHY_CNTL0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX0_RDPCSTX_PHY_CNTL2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX0_RDPCSTX_PHY_CNTL3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL6 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX0_RDPCSTX_PHY_CNTL8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX0_RDPCSTX_PHY_CNTL9 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX0_RDPCSTX_PHY_CNTL11 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL12 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX0_RDPCSTX_PHY_CNTL13 +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX0_RDPCSTX_PHY_CNTL14 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L +//RDPCSTX0_RDPCSTX_PHY_FUSE0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L +//RDPCSTX0_RDPCSTX_PHY_FUSE1 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L +//RDPCSTX0_RDPCSTX_PHY_FUSE2 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L +//RDPCSTX0_RDPCSTX_PHY_FUSE3 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L +#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L +//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL +#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L + + +// addressBlock: dpcssysa_dpcssys_cr0_dispdec +//DPCSSYS_CR0_DPCSSYS_CR_ADDR +#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//DPCSSYS_CR0_DPCSSYS_CR_DATA +#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL + + +// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec +//DPCSTX1_DPCSTX_TX_CLOCK_CNTL +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L +#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L +//DPCSTX1_DPCSTX_TX_CNTL +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L +//DPCSTX1_DPCSTX_CBUS_CNTL +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL +#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L +//DPCSTX1_DPCSTX_INTERRUPT_CNTL +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L +#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L +//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR +#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 +#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL +//DPCSTX1_DPCSTX_PLL_UPDATE_DATA +#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL +// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec +//RDPCSTX1_RDPCSTX_CNTL +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L +#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_CLOCK_CNTL +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L +//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L +//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA +#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L +//RDPCSTX1_RDPCS_TX_CR_ADDR +#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 +#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL +//RDPCSTX1_RDPCS_TX_CR_DATA +#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 +#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL +//RDPCSTX1_RDPCS_TX_SRAM_CNTL +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L +#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L +//RDPCSTX1_RDPCSTX_SCRATCH +#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL +//RDPCSTX1_RDPCSTX_PHY_CNTL0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L +//RDPCSTX1_RDPCSTX_PHY_CNTL2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L +//RDPCSTX1_RDPCSTX_PHY_CNTL3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL6 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL +#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L +//RDPCSTX1_RDPCSTX_PHY_CNTL8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL +//RDPCSTX1_RDPCSTX_PHY_CNTL9 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL +#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL +//RDPCSTX1_RDPCSTX_PHY_CNTL11 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL12 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L +#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L +//RDPCSTX1_RDPCSTX_PHY_CNTL13 +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L +//RDPCSTX1_RDPCSTX_PHY_CNTL14 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L +#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h index 92caf8441d1e..01a56556cde1 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h @@ -11932,5 +11932,32 @@ #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_RX_OVRD_OUT_2 0xe0c7 #define ixDPCSSYS_CR4_RAWLANEX_DIG_PCS_XF_TX_OVRD_IN_2 0xe0c8 +//RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSPIPE0_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L + +//RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L +#define RDPCSPIPE1_RDPCSPIPE_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L + +//[Note] Hack. RDPCSPIPE only has 2 instances. +#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE0_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6 0x2e4b +#define regRDPCSPIPE1_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE2_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6 0x2e4b +#define regRDPCSPIPE3_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 +#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6 0x2d73 +#define regRDPCSPIPE4_RDPCSPIPE_PHY_CNTL6_BASE_IDX 2 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h new file mode 100644 index 000000000000..b7d3d0df3260 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h @@ -0,0 +1,355 @@ +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mp_11_0_8_SH_MASK_HEADER +#define _mp_11_0_8_SH_MASK_HEADER + +#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 +#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL +#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 +#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L +#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L +#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8 +#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 +#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL +#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 +#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL +#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 +#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL +#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 +#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L +#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L +#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L +#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L +#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL +#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 +#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L +#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h index 6d0052ce6bed..da6d380c948b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h @@ -354,5 +354,12 @@ #define mmMP1_SMN_EXT_SCRATCH7 0x03c7 #define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 +/* + * addressBlock: mp_SmuMp1Pub_MmuDec + * base address: 0x0 + */ +#define smnMP1_PMI_3_START 0x3030204 +#define smnMP1_PMI_3_FIFO 0x3030208 +#define smnMP1_PMI_3 0x3030600 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h index 136fb5de6a4c..a5ae2a801254 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h @@ -959,5 +959,17 @@ #define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 #define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE_MASK 0x80000000L +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH_MASK 0x00000fffL + +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE__SHIFT 0x0000001f +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH__SHIFT 0x00000000 + + + #endif diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 6a505d1b82a5..da895d1f3b4f 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -7148,7 +7148,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) #else // not __cplusplus -#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) +#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName) / sizeof(USHORT)) #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 44955458fe38..7bd763361d6e 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -768,6 +768,10 @@ enum atom_encoder_caps_def ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not. ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board. + ATOM_ENCODER_CAP_RECORD_DP2 =0x10, // DP2 is supported by ASIC/board. + ATOM_ENCODER_CAP_RECORD_UHBR10_EN =0x20, // DP2.0 UHBR10 settings is supported by board + ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN =0x40, // DP2.0 UHBR13.5 settings is supported by board + ATOM_ENCODER_CAP_RECORD_UHBR20_EN =0x80, // DP2.0 UHBR20 settings is supported by board ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type. }; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index c84bd7b2cf59..ac941f62cbed 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -33,12 +33,11 @@ #include <linux/dma-fence.h> struct pci_dev; +struct amdgpu_device; #define KGD_MAX_QUEUES 128 struct kfd_dev; -struct kgd_dev; - struct kgd_mem; enum kfd_preempt_type { @@ -228,61 +227,61 @@ struct tile_config { */ struct kfd2kgd_calls { /* Register access functions */ - void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, + void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid, + int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); - int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); + int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id); - int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); - int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd, + int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); - int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); - int (*hqd_dump)(struct kgd_dev *kgd, + int (*hqd_dump)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - int (*hqd_sdma_dump)(struct kgd_dev *kgd, + int (*hqd_sdma_dump)(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - - int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + bool (*hqd_is_occupied)(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); - bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd); + int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, + uint32_t reset_type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id); + + bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); - int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, unsigned int timeout); - int (*address_watch_disable)(struct kgd_dev *kgd); - int (*address_watch_execute)(struct kgd_dev *kgd, + int (*address_watch_disable)(struct amdgpu_device *adev); + int (*address_watch_execute)(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); - int (*wave_control_execute)(struct kgd_dev *kgd, + int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); - uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, + uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_info)( - struct kgd_dev *kgd, + bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); @@ -290,16 +289,16 @@ struct kfd2kgd_calls { * passed to the shader by the CP. It's the user mode driver's * responsibility. */ - void (*set_scratch_backing_va)(struct kgd_dev *kgd, + void (*set_scratch_backing_va)(struct amdgpu_device *adev, uint64_t va, uint32_t vmid); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, + void (*set_vm_context_page_table_base)(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); - uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); + uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); - void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt, - int *max_waves_per_cu); - void (*program_trap_handler_settings)(struct kgd_dev *kgd, + void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, + int *wave_cnt, int *max_waves_per_cu); + void (*program_trap_handler_settings)(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); }; diff --git a/drivers/gpu/drm/amd/include/soc15_hw_ip.h b/drivers/gpu/drm/amd/include/soc15_hw_ip.h index 45ca4c921a66..c1519d20596a 100644 --- a/drivers/gpu/drm/amd/include/soc15_hw_ip.h +++ b/drivers/gpu/drm/amd/include/soc15_hw_ip.h @@ -80,6 +80,8 @@ #define L1IMU15_HWID 65 #define WAFLC_HWID 66 #define FCH_USB_PD_HWID 67 +#define SDMA2_HWID 68 +#define SDMA3_HWID 69 #define PCIE_HWID 70 #define PCS_HWID 80 #define DDCL_HWID 89 diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 03581d5b1836..08362d506534 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + dev_dbg(adev->dev, "IP block%d already in the target %s state!", + block_type, gate ? "gate" : "ungate"); + return 0; + } switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; } + if (!ret) + atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 249cb0aeb5ae..49df4c20f09e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -310,7 +310,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level = 0xff; + enum amd_dpm_forced_level current_level; int ret = 0; if (amdgpu_in_reset(adev)) @@ -350,6 +350,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, if (pp_funcs->get_performance_level) current_level = amdgpu_dpm_get_performance_level(adev); + else + current_level = adev->pm.dpm.forced_level; if (current_level == level) { pm_runtime_mark_last_busy(ddev->dev); @@ -2019,15 +2021,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, .attr_update = ss_power_attr_update), AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC, @@ -2087,10 +2089,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (asic_type < CHIP_VEGA12) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(asic_type == CHIP_VANGOGH)) + if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { + if (!adev->powerplay.pp_funcs->get_power_profile_mode || + amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; } @@ -3753,5 +3759,7 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) adev, &amdgpu_debugfs_pm_prv_buffer_fops, adev->pm.smu_prv_buffer_size); + + amdgpu_smu_stb_debug_fs_init(adev); #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 98f1b3d8c1d5..16e3f72d31b9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -417,6 +417,12 @@ struct amdgpu_dpm { enum amd_dpm_forced_level forced_level; }; +enum ip_power_state { + POWER_STATE_UNKNOWN, + POWER_STATE_ON, + POWER_STATE_OFF, +}; + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -452,6 +458,8 @@ struct amdgpu_pm { struct i2c_adapter smu_i2c; struct mutex smu_i2c_mutex; struct list_head pm_attr_list; + + atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 8156729c370b..f738f7dc20c9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -324,6 +324,7 @@ enum smu_table_id SMU_TABLE_OVERDRIVE, SMU_TABLE_I2C_COMMANDS, SMU_TABLE_PACE, + SMU_TABLE_ECCINFO, SMU_TABLE_COUNT, }; @@ -340,6 +341,7 @@ struct smu_table_context void *max_sustainable_clocks; struct smu_bios_boot_up_values boot_values; void *driver_pptable; + void *ecc_table; struct smu_table tables[SMU_TABLE_COUNT]; /* * The driver table is just a staging buffer for @@ -472,6 +474,12 @@ struct cmn2asic_mapping { int map_to; }; +struct stb_context { + uint32_t stb_buf_size; + bool enabled; + spinlock_t lock; +}; + #define WORKLOAD_POLICY_MAX 7 struct smu_context { @@ -559,6 +567,8 @@ struct smu_context uint16_t cpu_core_num; struct smu_user_dpm_profile user_dpm_profile; + + struct stb_context stb_context; }; struct i2c_adapter; @@ -1008,7 +1018,9 @@ struct pptable_funcs { /** * @set_power_limit: Set power limit in watts. */ - int (*set_power_limit)(struct smu_context *smu, uint32_t n); + int (*set_power_limit)(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); /** * @init_max_sustainable_clocks: Populate max sustainable clock speed @@ -1259,6 +1271,17 @@ struct pptable_funcs { * of SMUBUS table. */ int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); + + /** + * @get_ecc_table: message SMU to get ECC INFO table. + */ + ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); + + + /** + * @stb_collect_info: Collects Smart Trace Buffers data. + */ + int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); }; typedef enum { @@ -1395,6 +1418,9 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable); int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg); +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); +int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h index 8a08ecc34c69..4884a4e1f261 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h @@ -33,63 +33,47 @@ #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible -#define TABLE_COUNT 6 +#define TABLE_SMU_METRICS 6 // Called by Driver +#define TABLE_COUNT 7 -#define NUM_DSPCLK_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_MEMCLK_DPM_LEVELS 4 +typedef struct SmuMetricsTable_t { + //CPU status + uint16_t CoreFrequency[6]; //[MHz] + uint32_t CorePower[6]; //[mW] + uint16_t CoreTemperature[6]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + uint16_t C0Residency[6]; //Percentage -#define NUMBER_OF_PSTATES 8 -#define NUMBER_OF_CORES 8 + // GFX status + uint16_t GfxclkFrequency; //[MHz] + uint16_t GfxTemperature; //[centi-Celsius] -typedef enum { - S3_TYPE_ENTRY, - S5_TYPE_ENTRY, -} Sleep_Type_e; + // SOC IP info + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] -typedef enum { - GFX_OFF = 0, - GFX_ON = 1, -} GFX_Mode_e; + // power, VF info for CPU/GFX telemetry rails, and then socket power total + uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX + uint32_t CurrentSocketPower; //[mW] -typedef enum { - CPU_P0 = 0, - CPU_P1, - CPU_P2, - CPU_P3, - CPU_P4, - CPU_P5, - CPU_P6, - CPU_P7 -} CPU_PState_e; + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; + uint16_t Spare; -typedef enum { - CPU_CORE0 = 0, - CPU_CORE1, - CPU_CORE2, - CPU_CORE3, - CPU_CORE4, - CPU_CORE5, - CPU_CORE6, - CPU_CORE7 -} CORE_ID_e; +} SmuMetricsTable_t; -typedef enum { - DF_DPM0 = 0, - DF_DPM1, - DF_DPM2, - DF_DPM3, - DF_PState_Count -} DF_PState_e; - -typedef enum { - GFX_DPM0 = 0, - GFX_DPM1, - GFX_DPM2, - GFX_DPM3, - GFX_PState_Count -} GFX_PState_e; +typedef struct SmuMetrics_t { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + uint32_t SampleStartTime; + uint32_t SampleStopTime; + uint32_t Accnt; +} SmuMetrics_t; #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h index a017983ff1fa..0f67c56c2863 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -140,6 +140,8 @@ #define MAX_SW_I2C_COMMANDS 24 +#define ALDEBARAN_UMC_CHANNEL_NUM 32 + typedef enum { I2C_CONTROLLER_PORT_0, //CKSVII2C0 I2C_CONTROLLER_PORT_1, //CKSVII2C1 @@ -507,6 +509,19 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } AvfsDebugTable_t; +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM]; +} EccInfoTable_t; + // These defines are used with the following messages: // SMC_MSG_TransferTableDram2Smu // SMC_MSG_TransferTableSmu2Dram @@ -517,6 +532,7 @@ typedef struct { #define TABLE_SMU_METRICS 4 #define TABLE_DRIVER_SMU_CONFIG 5 #define TABLE_I2C_COMMANDS 6 -#define TABLE_COUNT 7 +#define TABLE_ECCINFO 7 +#define TABLE_COUNT 8 #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 6f1b1b50d527..18b862a90fbe 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -226,7 +226,10 @@ __SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(LightSBR), \ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ - __SMU_DUMMY_MAP(BoardPowerCalibration), + __SMU_DUMMY_MAP(BoardPowerCalibration), \ + __SMU_DUMMY_MAP(RequestGfxclk), \ + __SMU_DUMMY_MAP(ForceGfxVid), \ + __SMU_DUMMY_MAP(UnforceGfxVid), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index cbdae8a2c698..2d422e6a9feb 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -197,7 +197,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu); int smu_v11_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h index 6e6088760b18..909a86aa60f3 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h @@ -65,6 +65,13 @@ #define PPSMC_MSG_SetDriverTableVMID 0x34 #define PPSMC_MSG_SetSoftMinCclk 0x35 #define PPSMC_MSG_SetSoftMaxCclk 0x36 -#define PPSMC_Message_Count 0x37 +#define PPSMC_MSG_GetGfxFrequency 0x37 +#define PPSMC_MSG_GetGfxVid 0x38 +#define PPSMC_MSG_ForceGfxFreq 0x39 +#define PPSMC_MSG_UnForceGfxFreq 0x3A +#define PPSMC_MSG_ForceGfxVid 0x3B +#define PPSMC_MSG_UnforceGfxVid 0x3C +#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D +#define PPSMC_Message_Count 0x3E #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index dc91eb608791..44af23ae059e 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -27,7 +27,9 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 -#define SMU13_DRIVER_IF_VERSION_ALDE 0x07 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 + +#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms /* MP Apertures */ #define MP0_Public 0x03800000 @@ -163,7 +165,9 @@ int smu_v13_0_notify_display_change(struct smu_context *smu); int smu_v13_0_get_current_power_limit(struct smu_context *smu, uint32_t *power_limit); -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n); +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu); @@ -214,7 +218,6 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); -int smu_v13_0_mode1_reset(struct smu_context *smu); int smu_v13_0_mode2_reset(struct smu_context *smu); int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h index 1d3447991d0c..fc9198846e70 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h @@ -51,7 +51,7 @@ #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Set active work load type +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) #define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer @@ -63,7 +63,7 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) -#define PPSMC_MSG_SPARE0 0x16 ///< Spared +#define PPSMC_MSG_SPARE 0x16 ///< Spare #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 321215003643..20cb234d5061 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -875,34 +875,30 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; + int ret; - if (!hwmgr || !hwmgr->pm_en || !buf) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) + return -EOPNOTSUPP; + if (!buf) return -EINVAL; - if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return snprintf(buf, PAGE_SIZE, "\n"); - } - - return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EINVAL; + int ret = -EOPNOTSUPP; - if (!hwmgr || !hwmgr->pm_en) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) return ret; - if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return ret; - } - if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); - return ret; + return -EINVAL; } mutex_lock(&hwmgr->smu_lock); @@ -1555,7 +1551,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) static int pp_asic_reset_mode_2(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; + int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h index b7e2651b570b..2fc1733bcdcf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h @@ -29,9 +29,9 @@ typedef enum atom_smu9_syspll0_clock_id BIOS_CLKID; #define GetIndexIntoMasterCmdTable(FieldName) \ - (((char*)(&((struct atom_master_list_of_command_functions_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) + (offsetof(struct atom_master_list_of_command_functions_v2_1, FieldName) / sizeof(uint16_t)) #define GetIndexIntoMasterDataTable(FieldName) \ - (((char*)(&((struct atom_master_list_of_data_tables_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t)) + (offsetof(struct atom_master_list_of_data_tables_v2_1, FieldName) / sizeof(uint16_t)) #define PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES 32 diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 1de3ae77e03e..1f406f21b452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1036,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1; - size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1050,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1065,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1081,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; @@ -1456,6 +1456,8 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index e7803ce8f67a..611969bf4520 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4926,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4941,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4955,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4963,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5518,6 +5518,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index b94a77e4e714..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1559,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX); for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1571,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX); for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h index ad33983a8064..2a75da1e9f03 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h @@ -109,6 +109,19 @@ int phm_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void phm_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c152a61ddd2c..e6336654c565 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4548,6 +4548,8 @@ static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -4650,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4661,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4672,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4684,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4698,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4717,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } @@ -5112,6 +5114,8 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%s %16s %s %s %s %s\n",title[0], title[1], title[2], title[3], title[4], title[5]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 8558718e15a8..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2141,6 +2141,8 @@ static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -2256,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2272,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2290,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2308,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 0cf39c1244b1..85d55ab4e369 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3238,6 +3238,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) int ret = 0; int size = 0; + phm_get_sysfs_buf(&buf, &size); + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); PP_ASSERT_WITH_CODE(!ret, "[EnableAllSmuFeatures] Failed to get enabled smc features!", @@ -3372,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3390,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3408,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3426,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3438,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3458,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3479,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break; case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3503,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size = sysfs_emit(buf, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3518,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3539,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } @@ -4003,6 +4005,8 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) if (!buf) return -EINVAL; + phm_get_sysfs_buf(&buf, &size); + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s %s\n", title[0], title[1], title[2], title[3], title[4], title[5], title[6], title[7], title[8], title[9], title[10]); diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index bdbbeb959c68..81f82aa05ec2 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); + ni_update_current_ps(adev, boot_ps); + return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 3ab1ce4d3419..edcf2738748a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -455,7 +455,11 @@ static int smu_get_power_num_states(void *handle, bool is_support_sw_smu(struct amdgpu_device *adev) { - if (adev->asic_type >= CHIP_ARCTURUS) + /* vega20 is 11.0.2, but it's supported via the powerplay code */ + if (adev->asic_type == CHIP_VEGA20) + return false; + + if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) return true; return false; @@ -575,41 +579,43 @@ static int smu_set_funcs(struct amdgpu_device *adev) if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): navi10_set_ppt_funcs(smu); break; - case CHIP_ARCTURUS: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - arcturus_set_ppt_funcs(smu); - /* OD is not supported on Arcturus */ - smu->od_enabled =false; - break; - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): sienna_cichlid_set_ppt_funcs(smu); break; - case CHIP_ALDEBARAN: - aldebaran_set_ppt_funcs(smu); - /* Enable pp_od_clk_voltage node */ - smu->od_enabled = true; - break; - case CHIP_RENOIR: + case IP_VERSION(12, 0, 0): + case IP_VERSION(12, 0, 1): renoir_set_ppt_funcs(smu); break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): vangogh_set_ppt_funcs(smu); break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): yellow_carp_set_ppt_funcs(smu); break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(11, 0, 8): cyan_skillfish_set_ppt_funcs(smu); break; + case IP_VERSION(11, 0, 2): + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + arcturus_set_ppt_funcs(smu); + /* OD is not supported on Arcturus */ + smu->od_enabled =false; + break; + case IP_VERSION(13, 0, 2): + aldebaran_set_ppt_funcs(smu); + /* Enable pp_od_clk_voltage node */ + smu->od_enabled = true; + break; default: return -EINVAL; } @@ -694,7 +700,8 @@ static int smu_late_init(void *handle) return ret; } - if (adev->asic_type == CHIP_YELLOW_CARP) + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) return 0; if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { @@ -1140,9 +1147,18 @@ static int smu_smc_hw_setup(struct smu_context *smu) if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); /* this is needed specifically */ - if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): ret = smu_system_features_control(smu, true); + if (ret) + dev_err(adev->dev, "Failed system features control!\n"); + break; + default: + break; + } return ret; } @@ -1263,8 +1279,10 @@ static int smu_smc_hw_setup(struct smu_context *smu) } ret = smu_notify_display_change(smu); - if (ret) + if (ret) { + dev_err(adev->dev, "Failed to notify display change!\n"); return ret; + } /* * Set min deep sleep dce fclk with bootup value from vbios via @@ -1272,8 +1290,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) */ ret = smu_set_min_dcef_deep_sleep(smu, smu->smu_table.boot_values.dcefclk / 100); - if (ret) - return ret; return ret; } @@ -1284,7 +1300,7 @@ static int smu_start_smc_engine(struct smu_context *smu) int ret = 0; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - if (adev->asic_type < CHIP_NAVI10) { + if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { if (smu->ppt_funcs->load_microcode) { ret = smu->ppt_funcs->load_microcode(smu); if (ret) @@ -1402,23 +1418,41 @@ static int smu_disable_dpms(struct smu_context *smu) * - SMU firmware can handle the DPM reenablement * properly. */ - if (smu->uploading_custom_pp_table && - (adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) - return smu_disable_all_features_with_exception(smu, - true, - SMU_FEATURE_COUNT); + if (smu->uploading_custom_pp_table) { + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + return smu_disable_all_features_with_exception(smu, + true, + SMU_FEATURE_COUNT); + default: + break; + } + } /* * For Sienna_Cichlid, PMFW will handle the features disablement properly * on BACO in. Driver involvement is unnecessary. */ - if (((adev->asic_type == CHIP_SIENNA_CICHLID) || - ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) && - use_baco) - return smu_disable_all_features_with_exception(smu, - true, - SMU_FEATURE_BACO_BIT); + if (use_baco) { + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + return smu_disable_all_features_with_exception(smu, + true, + SMU_FEATURE_BACO_BIT); + default: + break; + } + } /* * For gpu reset, runpm and hibernation through BACO, @@ -1436,7 +1470,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->asic_type >= CHIP_NAVI10 && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -2229,6 +2263,7 @@ int smu_get_power_limit(void *handle, enum pp_power_type pp_power_type) { struct smu_context *smu = handle; + struct amdgpu_device *adev = smu->adev; enum smu_ppt_limit_level limit_level; uint32_t limit_type; int ret = 0; @@ -2272,15 +2307,20 @@ int smu_get_power_limit(void *handle, } else { switch (limit_level) { case SMU_PPT_LIMIT_CURRENT: - if ((smu->adev->asic_type == CHIP_ALDEBARAN) || - (smu->adev->asic_type == CHIP_SIENNA_CICHLID) || - (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) || - (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) || - (smu->adev->asic_type == CHIP_BEIGE_GOBY)) + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): ret = smu_get_asic_power_limits(smu, &smu->current_power_limit, NULL, NULL); + break; + default: + break; + } *limit = smu->current_power_limit; break; case SMU_PPT_LIMIT_DEFAULT: @@ -2310,9 +2350,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit) mutex_lock(&smu->mutex); + limit &= (1<<24)-1; if (limit_type != SMU_DEFAULT_PPT_LIMIT) if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); goto out; } @@ -2328,7 +2369,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit) limit = smu->current_power_limit; if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit); + ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) smu->user_dpm_profile.power_limit = limit; } @@ -2495,13 +2536,15 @@ static int smu_get_power_profile_mode(void *handle, char *buf) struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->get_power_profile_mode) return -EOPNOTSUPP; + if (!buf) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_power_profile_mode) - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); mutex_unlock(&smu->mutex); @@ -2515,7 +2558,8 @@ static int smu_set_power_profile_mode(void *handle, struct smu_context *smu = handle; int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) + if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || + !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; mutex_lock(&smu->mutex); @@ -3030,6 +3074,20 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable) return ret; } +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) +{ + int ret = -EOPNOTSUPP; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs && + smu->ppt_funcs->get_ecc_info) + ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); + mutex_unlock(&smu->mutex); + + return ret; + +} + static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) { struct smu_context *smu = handle; @@ -3119,3 +3177,107 @@ int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, return ret; } + +int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) +{ + + if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) + return -EOPNOTSUPP; + + /* Confirm the buffer allocated is of correct size */ + if (size != smu->stb_context.stb_buf_size) + return -EINVAL; + + /* + * No need to lock smu mutex as we access STB directly through MMIO + * and not going through SMU messaging route (for now at least). + * For registers access rely on implementation internal locking. + */ + return smu->ppt_funcs->stb_collect_info(smu, buf, size); +} + +#if defined(CONFIG_DEBUG_FS) + +static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + unsigned char *buf; + int r; + + buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); + if (r) + goto out; + + filp->private_data = buf; + + return 0; + +out: + kvfree(buf); + return r; +} + +static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + + + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, + size, + pos, filp->private_data, + smu->stb_context.stb_buf_size); +} + +static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) +{ + kvfree(filp->private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * We have to define not only read method but also + * open and release because .read takes up to PAGE_SIZE + * data each time so and so is invoked multiple times. + * We allocate the STB buffer in .open and release it + * in .release + */ +static const struct file_operations smu_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = smu_stb_debugfs_open, + .read = smu_stb_debugfs_read, + .release = smu_stb_debugfs_release, + .llseek = default_llseek, +}; + +#endif + +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + struct smu_context *smu = &adev->smu; + + if (!smu->stb_context.stb_buf_size) + return; + + debugfs_create_file_size("amdgpu_smu_stb_dump", + S_IRUSR, + adev_to_drm(adev)->primary->debugfs_root, + adev, + &smu_stb_debugfs_fops, + smu->stb_context.stb_buf_size); +#endif + +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index e343cc218990..fd1d30a93db5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -436,6 +436,19 @@ static void arcturus_check_bxco_support(struct smu_context *smu) } } +static void arcturus_check_fan_support(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + + /* No sort of fan control possible if PPTable has it disabled */ + smu->adev->pm.no_fan = + !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK); + if (smu->adev->pm.no_fan) + dev_info_once(smu->adev->dev, + "PMFW based fan control disabled"); +} + static int arcturus_check_powerplay_table(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -443,6 +456,7 @@ static int arcturus_check_powerplay_table(struct smu_context *smu) table_context->power_play_table; arcturus_check_bxco_support(smu); + arcturus_check_fan_support(smu); table_context->thermal_controller_type = powerplay_table->thermal_controller_type; @@ -771,8 +785,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = NULL; uint32_t gen_speed, lane_width; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index b05f9541accc..2238ee19c222 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -44,6 +44,28 @@ #undef pr_info #undef pr_debug +/* unit: MHz */ +#define CYAN_SKILLFISH_SCLK_MIN 1000 +#define CYAN_SKILLFISH_SCLK_MAX 2000 + +/* unit: mV */ +#define CYAN_SKILLFISH_VDDC_MIN 700 +#define CYAN_SKILLFISH_VDDC_MAX 1129 +#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe + +static struct gfx_user_settings { + uint32_t sclk; + uint32_t vddc; +} cyan_skillfish_user_settings; + +static uint32_t cyan_skillfish_sclk_default; + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ + FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ + FEATURE_MASK(FEATURE_GFX_DPM_BIT)) + static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), @@ -52,14 +74,497 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), + MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0), + MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0), + MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0), +}; + +static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = { + TAB_MAP_VALID(SMU_METRICS), }; +static int cyan_skillfish_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, + sizeof(SmuMetrics_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->metrics_time = 0; + + return 0; + +err1_out: + smu_table->gpu_metrics_table_size = 0; + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int cyan_skillfish_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = cyan_skillfish_tables_init(smu); + if (ret) + return ret; + + return smu_v11_0_init_smc_tables(smu); +} + +static int cyan_skillfish_finit_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + smu_table->gpu_metrics_table_size = 0; + + smu_table->metrics_time = 0; + + return 0; +} + +static int +cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->Current.GfxclkFrequency; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->Current.SocclkFrequency; + break; + case METRICS_CURR_VCLK: + *value = metrics->Current.VclkFrequency; + break; + case METRICS_CURR_DCLK: + *value = metrics->Current.DclkFrequency; + break; + case METRICS_CURR_UCLK: + *value = metrics->Current.MemclkFrequency; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Current.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->Current.GfxTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->Current.SocTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_VOLTAGE_VDDSOC: + *value = metrics->Current.Voltage[0]; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->Current.Voltage[1]; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->Current.ThrottlerStatus; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int cyan_skillfish_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + + switch (sensor) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDNB: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDSOC, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&smu->sensor_lock); + + return ret; +} + +static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + member_type = METRICS_CURR_GFXCLK; + break; + case SMU_FCLK: + case SMU_MCLK: + member_type = METRICS_CURR_UCLK; + break; + case SMU_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case SMU_VCLK: + member_type = METRICS_CURR_VCLK; + break; + case SMU_DCLK: + member_type = METRICS_CURR_DCLK; + break; + default: + return -EINVAL; + } + + return cyan_skillfish_get_smu_metrics_data(smu, member_type, value); +} + +static int cyan_skillfish_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + int ret = 0, size = 0; + uint32_t cur_value = 0; + int i; + + smu_cmn_get_sysfs_buf(&buf, &size); + + switch (clk_type) { + case SMU_OD_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + case SMU_OD_VDDC_CURVE: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC"); + size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value); + break; + case SMU_OD_RANGE: + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + break; + case SMU_FCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_VCLK: + case SMU_DCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + case SMU_SCLK: + case SMU_GFXCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + if (cur_value == CYAN_SKILLFISH_SCLK_MAX) + i = 2; + else if (cur_value == CYAN_SKILLFISH_SCLK_MIN) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : cyan_skillfish_sclk_default, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX, + i == 2 ? "*" : ""); + break; + default: + dev_warn(smu->adev->dev, "Unsupported clock type\n"); + return ret; + } + + return size; +} + +static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t feature_mask[2]; + uint64_t feature_enabled; + + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32); + + /* + * cyan_skillfish specific, query default sclk inseted of hard code. + */ + if (!cyan_skillfish_sclk_default) + cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, + &cyan_skillfish_sclk_default); + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_2 *gpu_metrics = + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int i, ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_soc_power = metrics.Current.Power[0]; + gpu_metrics->average_gfx_power = metrics.Current.Power[1]; + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + for (i = 0; i < 6; i++) { + gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i]; + gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i]; + gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i]; + } + + for (i = 0; i < 2; i++) { + gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i]; + gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i]; + } + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_2); +} + +static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], uint32_t size) +{ + int ret = 0; + uint32_t vid; + + switch (type) { + case PP_OD_EDIT_VDDC_CURVE: + if (size != 3 || input[0] != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (input[1] < CYAN_SKILLFISH_SCLK_MIN || + input[1] > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if (input[2] < CYAN_SKILLFISH_VDDC_MIN || + input[2] > CYAN_SKILLFISH_VDDC_MAX) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = input[1]; + cyan_skillfish_user_settings.vddc = input[2]; + + break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = cyan_skillfish_sclk_default; + cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; + + break; + case PP_OD_COMMIT_DPM_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN || + cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) && + (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN || + cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk, + cyan_skillfish_user_settings.sclk, NULL); + if (ret) { + dev_err(smu->adev->dev, "Set sclk failed!\n"); + return ret; + } + + if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Unforce vddc failed!\n"); + return ret; + } + } else { + /* + * PMFW accepts SVI2 VID code, convert voltage to VID: + * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001) + */ + vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Force vddc failed!\n"); + return ret; + } + } + + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .check_fw_status = smu_v11_0_check_fw_status, .check_fw_version = smu_v11_0_check_fw_version, .init_power = smu_v11_0_init_power, .fini_power = smu_v11_0_fini_power, + .init_smc_tables = cyan_skillfish_init_smc_tables, + .fini_smc_tables = cyan_skillfish_finit_smc_tables, + .read_sensor = cyan_skillfish_read_sensor, + .print_clk_levels = cyan_skillfish_print_clk_levels, + .is_dpm_running = cyan_skillfish_is_dpm_running, + .get_gpu_metrics = cyan_skillfish_get_gpu_metrics, + .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table, .register_irq_handler = smu_v11_0_register_irq_handler, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, @@ -72,5 +577,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &cyan_skillfish_ppt_funcs; smu->message_map = cyan_skillfish_message_map; + smu->table_map = cyan_skillfish_table_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index a5fc5d7cb6c7..60a557068ea4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -86,21 +86,21 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0), MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1), - MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0), MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), - MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0), MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), @@ -345,7 +345,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DPM UCLK enablement should be skipped for navi10 A0 secure board */ if (!(is_asic_secure(smu) && - (adev->asic_type == CHIP_NAVI10) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (adev->rev_id == 0)) && (adev->pm.pp_feature & PP_MCLK_DPM_MASK)) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) @@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */ if (is_asic_secure(smu) && - (adev->asic_type == CHIP_NAVI10) && + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (adev->rev_id == 0)) *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); @@ -925,18 +925,18 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu, return ret; } - switch (adev->asic_type) { - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 9): if (smu_version > 0x00341C00) ret = navi12_get_smu_metrics_data(smu, member, value); else ret = navi12_get_legacy_smu_metrics_data(smu, member, value); break; - case CHIP_NAVI10: - case CHIP_NAVI14: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): default: - if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || - ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_smu_metrics_data(smu, member, value); else ret = navi10_get_legacy_smu_metrics_data(smu, member, value); @@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, size = 0, ret = 0; + int i, levels, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu, struct smu_11_0_overdrive_table *od_settings = smu->od_settings; uint32_t min_value, max_value; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1317,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : cur_value == freq_values[2] ? 2 : 1; - if (mark_index != 1) - freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - for (i = 0; i < 3; i++) { + levels = 3; + if (mark_index != 1) { + levels = 2; + freq_values[1] = freq_values[2]; + } + + for (i = 0; i < levels; i++) { size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], i == mark_index ? "*" : ""); } - } break; case SMU_PCIE: @@ -1392,7 +1397,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_OD_RANGE: if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, @@ -1507,8 +1512,8 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) uint32_t sclk_freq; pstate_table->gfxclk_pstate.min = gfx_table->min; - switch (adev->asic_type) { - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: @@ -1523,7 +1528,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) break; } break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): switch (adev->pdev->revision) { case 0xc7: /* XT */ case 0xf4: @@ -1546,7 +1551,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) break; } break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK; break; default: @@ -2272,7 +2277,27 @@ static int navi10_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + /* + * This aims the case below: + * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded + * + * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To + * make that possible, PMFW needs to acknowledge the dstate transition + * process for both gfx(function 0) and audio(function 1) function of + * the ASIC. + * + * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the + * device representing the audio function of the ASIC. And that means + * even if the sound driver(snd_hda_intel) was not loaded yet, it's still + * possible runpm suspend kicked on the ASIC. However without the dstate + * transition notification from audio function, pmfw cannot handle the + * BACO in/exit correctly. And that will cause driver hang on runpm + * resuming. + * + * To address this, we revert to legacy message way(driver masters the + * timing for BACO in/exit) on sound driver missing. + */ + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2282,7 +2307,7 @@ static int navi10_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); @@ -2540,8 +2565,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu) if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) return false; - if (adev->asic_type == CHIP_NAVI10 || - adev->asic_type == CHIP_NAVI14) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) return true; return false; @@ -2649,8 +2674,8 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) * - PPSMC_MSG_SetDriverDummyTableDramAddrLow * - PPSMC_MSG_GetUMCFWWA */ - if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) || - ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) { + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GET_UMC_FW_WA, 0, @@ -2669,13 +2694,13 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu) return 0; if (umc_fw_disable_cdr) { - if (adev->asic_type == CHIP_NAVI10) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } else { return navi10_set_dummy_pstates_table_location(smu); } } else { - if (adev->asic_type == CHIP_NAVI10) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) return navi10_umc_hybrid_cdr_workaround(smu); } @@ -3129,18 +3154,18 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, return ret; } - switch (adev->asic_type) { - case CHIP_NAVI12: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 9): if (smu_version > 0x00341C00) ret = navi12_get_gpu_metrics(smu, table); else ret = navi12_get_legacy_gpu_metrics(smu, table); break; - case CHIP_NAVI10: - case CHIP_NAVI14: + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): default: - if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) || - ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00)) + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00)) ret = navi10_get_gpu_metrics(smu, table); else ret =navi10_get_legacy_gpu_metrics(smu, table); @@ -3158,7 +3183,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) uint32_t param = 0; /* Navi12 does not support this */ - if (adev->asic_type == CHIP_NAVI12) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) return 0; /* diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 5e292c3f5050..a673e05853fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -74,15 +74,18 @@ #define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 #define GET_PPTABLE_MEMBER(field, member) do {\ - if (smu->adev->asic_type == CHIP_BEIGE_GOBY)\ + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\ else\ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ } while(0) +/* STB FIFO depth is in 64bit units */ +#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8 + static int get_table_size(struct smu_context *smu) { - if (smu->adev->asic_type == CHIP_BEIGE_GOBY) + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) return sizeof(PPTable_beige_goby_t); else return sizeof(PPTable_t); @@ -298,7 +301,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu, } if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) && - (adev->asic_type > CHIP_SIENNA_CICHLID) && + (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) && !(adev->flags & AMD_IS_APU)) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT); @@ -496,7 +499,7 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s uint32_t throttler_status = 0; int i; - if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) { for (i = 0; i < THROTTLER_COUNT; i++) throttler_status |= @@ -517,7 +520,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); SmuMetrics_V2_t *metrics_v2 = &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V2); - bool use_metrics_v2 = ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) && + bool use_metrics_v2 = ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) ? true : false; uint16_t average_gfx_activity; int ret = 0; @@ -650,6 +653,8 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu) return 0; } +static void sienna_cichlid_stb_init(struct smu_context *smu); + static int sienna_cichlid_init_smc_tables(struct smu_context *smu) { int ret = 0; @@ -662,6 +667,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu) if (ret) return ret; + sienna_cichlid_stb_init(smu); + return smu_v11_0_init_smc_tables(smu); } @@ -670,7 +677,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_11_0_dpm_table *dpm_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; DpmDescriptor_t *table_member; /* socclk dpm table setup */ @@ -746,78 +753,45 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* vclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.vclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } + /* vclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; - /* vclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.vclk1_table; + dpm_table = &dpm_context->dpm_tables.vclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_VCLK1, + i ? SMU_VCLK1 : SMU_VCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_VCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_VCLK_1 : PPCLK_VCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; } } - /* dclk0 dpm table setup */ - dpm_table = &dpm_context->dpm_tables.dclk_table; - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK, - dpm_table); - if (ret) - return ret; - dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_0].SnapToDiscrete; - } else { - dpm_table->count = 1; - dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; - dpm_table->dpm_levels[0].enabled = true; - dpm_table->min = dpm_table->dpm_levels[0].value; - dpm_table->max = dpm_table->dpm_levels[0].value; - } - - /* dclk1 dpm table setup */ - if (adev->vcn.num_vcn_inst > 1) { - dpm_table = &dpm_context->dpm_tables.dclk1_table; + /* dclk0/1 dpm table setup */ + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + dpm_table = &dpm_context->dpm_tables.dclk_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_v11_0_set_single_dpm_table(smu, - SMU_DCLK1, + i ? SMU_DCLK1 : SMU_DCLK, dpm_table); if (ret) return ret; dpm_table->is_fine_grained = - !table_member[PPCLK_DCLK_1].SnapToDiscrete; + !table_member[i ? PPCLK_DCLK_1 : PPCLK_DCLK_0].SnapToDiscrete; } else { dpm_table->count = 1; - dpm_table->dpm_levels[0].value = - smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; dpm_table->dpm_levels[0].enabled = true; dpm_table->min = dpm_table->dpm_levels[0].value; dpm_table->max = dpm_table->dpm_levels[0].value; @@ -902,32 +876,18 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { struct amdgpu_device *adev = smu->adev; - int ret = 0; + int i, ret = 0; - if (enable) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* vcn dpm on is a prerequisite for vcn power gate messages */ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + 0x10000 * i, NULL); if (ret) return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, - 0x10000, NULL); - if (ret) - return ret; - } - } - } else { - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); - if (ret) - return ret; - if (adev->vcn.num_vcn_inst > 1) { - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, - 0x10000, NULL); - if (ret) - return ret; - } } } @@ -1058,6 +1018,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; uint32_t smu_version; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1168,7 +1130,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, * and onwards SMU firmwares. */ smu_cmn_get_smc_version(smu, NULL, &smu_version); - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900)) break; @@ -1180,7 +1142,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, @@ -1216,7 +1178,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { struct amdgpu_device *adev = smu->adev; - int ret = 0, size = 0; + int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; @@ -1261,7 +1223,7 @@ forec_level_out: if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) amdgpu_gfx_off_ctrl(adev, true); - return size; + return 0; } static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) @@ -1935,7 +1897,7 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu, od_table->UclkFmax); smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (!((adev->asic_type == CHIP_SIENNA_CICHLID) && + if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900))) dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset); } @@ -2159,7 +2121,7 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, * and onwards SMU firmwares. */ smu_cmn_get_smc_version(smu, NULL, &smu_version); - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu_version < 0x003a2900)) { dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported " "only by 58.41.0 and onwards SMU firmwares!\n"); @@ -2180,14 +2142,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, static int sienna_cichlid_run_btc(struct smu_context *smu) { - return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + int res; + + res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + if (res) + dev_err(smu->adev->dev, "RunDcBtc failed!\n"); + + return res; } static int sienna_cichlid_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2197,7 +2165,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); @@ -2863,7 +2831,7 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; int i; - if (smu->adev->asic_type == CHIP_BEIGE_GOBY) { + if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) { beige_goby_dump_pptable(smu); return; } @@ -3623,7 +3591,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, SmuMetrics_V2_t *metrics_v2 = &(metrics_external.SmuMetrics_V2); struct amdgpu_device *adev = smu->adev; - bool use_metrics_v2 = ((adev->asic_type == CHIP_SIENNA_CICHLID) && + bool use_metrics_v2 = ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && (smu->smc_fw_version >= 0x3A4300)) ? true : false; uint16_t average_gfx_activity; int ret = 0; @@ -3664,6 +3632,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->energy_accumulator = use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator; + if (metrics->CurrGfxVoltageOffset) + gpu_metrics->voltage_gfx = + (155000 - 625 * metrics->CurrGfxVoltageOffset) / 100; + if (metrics->CurrMemVidOffset) + gpu_metrics->voltage_mem = + (155000 - 625 * metrics->CurrMemVidOffset) / 100; + if (metrics->CurrSocVoltageOffset) + gpu_metrics->voltage_soc = + (155000 - 625 * metrics->CurrSocVoltageOffset) / 100; + average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity; if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) gpu_metrics->average_gfxclk_frequency = @@ -3704,8 +3682,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->current_fan_speed = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed; - if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu->smc_fw_version > 0x003A1E00) || - ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu->smc_fw_version > 0x00410400)) { + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) || + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) { gpu_metrics->pcie_link_width = use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth; gpu_metrics->pcie_link_speed = link_speed[use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate]; } else { @@ -3838,6 +3816,53 @@ static int sienna_cichlid_set_mp1_state(struct smu_context *smu, return ret; } +static void sienna_cichlid_stb_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t reg; + + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_START); + smu->stb_context.enabled = REG_GET_FIELD(reg, MP1_PMI_3_START, ENABLE); + + /* STB is disabled */ + if (!smu->stb_context.enabled) + return; + + spin_lock_init(&smu->stb_context.lock); + + /* STB buffer size in bytes as function of FIFO depth */ + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_FIFO); + smu->stb_context.stb_buf_size = 1 << REG_GET_FIELD(reg, MP1_PMI_3_FIFO, DEPTH); + smu->stb_context.stb_buf_size *= SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES; + + dev_info(smu->adev->dev, "STB initialized to %d entries", + smu->stb_context.stb_buf_size / SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES); + +} + +int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, + void *buf, + uint32_t size) +{ + uint32_t *p = buf; + struct amdgpu_device *adev = smu->adev; + + /* No need to disable interrupts for now as we don't lock it yet from ISR */ + spin_lock(&smu->stb_context.lock); + + /* + * Read the STB FIFO in units of 32bit since this is the accessor window + * (register width) we have. + */ + buf = ((char *) buf) + size; + while ((void *)p < buf) + *p++ = cpu_to_le32(RREG32_PCIE(MP1_Public | smnMP1_PMI_3)); + + spin_unlock(&smu->stb_context.lock); + + return 0; +} + static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, @@ -3927,6 +3952,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .interrupt_work = smu_v11_0_interrupt_work, .gpo_control = sienna_cichlid_gpo_control, .set_mp1_state = sienna_cichlid_set_mp1_state, + .stb_collect_info = sienna_cichlid_stb_get_data_direct, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 87b055466a33..28b7c0562b99 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -90,37 +90,38 @@ int smu_v11_0_init_microcode(struct smu_context *smu) struct amdgpu_firmware_info *ucode = NULL; if (amdgpu_sriov_vf(adev) && - ((adev->asic_type == CHIP_NAVI12) || - (adev->asic_type == CHIP_SIENNA_CICHLID))) + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) return 0; - switch (adev->asic_type) { - case CHIP_ARCTURUS: - chip_name = "arcturus"; - break; - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): chip_name = "navi10"; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): chip_name = "navi14"; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): chip_name = "navi12"; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): chip_name = "sienna_cichlid"; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(11, 0, 11): chip_name = "navy_flounder"; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(11, 0, 12): chip_name = "dimgrey_cavefish"; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 13): chip_name = "beige_goby"; break; + case IP_VERSION(11, 0, 2): + chip_name = "arcturus"; + break; default: - dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + dev_err(adev->dev, "Unsupported IP version 0x%x\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } @@ -238,39 +239,40 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) if (smu->is_apu) adev->pm.fw_version = smu_version; - switch (smu->adev->asic_type) { - case CHIP_ARCTURUS: - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; - break; - case CHIP_NAVI10: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; break; - case CHIP_NAVI12: + case IP_VERSION(11, 0, 9): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; break; - case CHIP_NAVI14: + case IP_VERSION(11, 0, 5): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; break; - case CHIP_SIENNA_CICHLID: + case IP_VERSION(11, 0, 7): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; break; - case CHIP_NAVY_FLOUNDER: + case IP_VERSION(11, 0, 11): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; break; - case CHIP_VANGOGH: + case IP_VERSION(11, 5, 0): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; break; - case CHIP_DIMGREY_CAVEFISH: + case IP_VERSION(11, 0, 12): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; break; - case CHIP_BEIGE_GOBY: + case IP_VERSION(11, 0, 13): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; break; - case CHIP_CYAN_SKILLFISH: + case IP_VERSION(11, 0, 8): smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; break; + case IP_VERSION(11, 0, 2): + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; + break; default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); + dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", + adev->ip_versions[MP1_HWIP][0]); smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; break; } @@ -492,8 +494,9 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu) int smu_v11_0_init_power(struct smu_context *smu) { + struct amdgpu_device *adev = smu->adev; struct smu_power_context *smu_power = &smu->smu_power; - size_t size = smu->adev->asic_type == CHIP_VANGOGH ? + size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? sizeof(struct smu_11_5_power_context) : sizeof(struct smu_11_0_power_context); @@ -750,8 +753,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) /* Navy_Flounder/Dimgrey_Cavefish do not support to change * display num currently */ - if (adev->asic_type >= CHIP_NAVY_FLOUNDER && - adev->asic_type <= CHIP_BEIGE_GOBY) + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || + adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) return 0; return smu_cmn_send_smc_msg_with_param(smu, @@ -974,10 +979,16 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int power_src; int ret = 0; + uint32_t limit_param; + + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); @@ -997,16 +1008,16 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) * BIT 16-23: PowerSource * BIT 0-15: PowerLimit */ - n &= 0xFFFF; - n |= 0 << 24; - n |= (power_src) << 16; - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + limit_param = (limit & 0xFFFF); + limit_param |= 0 << 24; + limit_param |= (power_src) << 16; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } @@ -1136,15 +1147,15 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->asic_type) { - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 0): + case IP_VERSION(11, 0, 5): + case IP_VERSION(11, 0, 9): + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): + case IP_VERSION(11, 5, 0): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -1630,11 +1641,11 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) mutex_lock(&smu_baco->mutex); if (state == SMU_BACO_STATE_ENTER) { - switch (adev->asic_type) { - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 0, 12): + case IP_VERSION(11, 0, 13): if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, @@ -1649,7 +1660,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) default: if (!ras || !adev->ras_enabled || adev->gmc.xgmi.pending_reset) { - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); data |= 0x80000000; WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); @@ -1931,7 +1942,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu, * Separate MCLK and SOCCLK soft min/max settings are not allowed * on Arcturus. */ - if (adev->asic_type == CHIP_ARCTURUS) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { mclk_min = mclk_max = 0; socclk_min = socclk_max = 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 3a3421452e57..c02ed65ffa38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", @@ -681,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; + uint32_t min, max; memset(&metrics, 0, sizeof(metrics)); @@ -688,10 +691,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -700,7 +705,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -709,7 +714,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", @@ -739,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); + if (ret) { + return ret; + } + break; default: break; } @@ -764,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (!cur_value_match_level) size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); break; + case SMU_GFXCLK: + case SMU_SCLK: + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } @@ -1382,52 +1412,38 @@ static int vangogh_set_performance_level(struct smu_context *smu, uint32_t soc_mask, mclk_mask, fclk_mask; uint32_t vclk_mask = 0, dclk_mask = 0; + smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; + smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; + smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; ret = vangogh_force_dpm_limit_value(smu, true); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_LOW: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; ret = vangogh_force_dpm_limit_value(smu, false); + if (ret) + return ret; break; case AMD_DPM_FORCED_LEVEL_AUTO: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_unforce_dpm_levels(smu); - break; - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL); if (ret) return ret; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK; ret = vangogh_get_profiling_clk_mask(smu, level, &vclk_mask, @@ -1442,32 +1458,15 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask); - break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, - VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL); - if (ret) - return ret; + smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq; break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - ret = vangogh_get_profiling_clk_mask(smu, level, NULL, NULL, @@ -1480,29 +1479,29 @@ static int vangogh_set_performance_level(struct smu_context *smu, vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); break; case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; - smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; + smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; + smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK; - smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq; - smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, - VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL); + ret = vangogh_set_peak_clock_by_device(smu); if (ret) return ret; - - ret = vangogh_set_peak_clock_by_device(smu); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: default: - break; + return 0; } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + smu->gfx_actual_hard_min_freq, NULL); + if (ret) + return ret; + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + smu->gfx_actual_soft_max_freq, NULL); + if (ret) + return ret; + return ret; } @@ -2140,11 +2139,12 @@ static int vangogh_get_ppt_limit(struct smu_context *smu, return 0; } -static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit) +static int vangogh_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t ppt_limit) { struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; - uint32_t limit_type = ppt_limit >> 24; + smu->smu_power.power_context; int ret = 0; if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5aa175e12a78..145f13b8c977 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ab652028e003..6e781cee8bb6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -78,6 +78,12 @@ #define smnPCIE_ESM_CTRL 0x111003D0 +/* + * SMU support ECCTABLE since version 68.42.0, + * use this to check ECCTALE feature whether support + */ +#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00 + static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, @@ -190,6 +196,7 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), }; static const uint8_t aldebaran_throttler_map[] = { @@ -223,6 +230,9 @@ static int aldebaran_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -235,6 +245,10 @@ static int aldebaran_tables_init(struct smu_context *smu) return -ENOMEM; } + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + return -ENOMEM; + return 0; } @@ -733,15 +747,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, uint32_t freq_values[3] = {0}; uint32_t min_clk, max_clk; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; switch (type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "GFXCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); fallthrough; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -795,7 +813,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size = sysfs_emit(buf, "%s:\n", "MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); fallthrough; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); @@ -1237,11 +1255,13 @@ static int aldebaran_get_power_limit(struct smu_context *smu, return 0; } -static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n) +static int aldebaran_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { /* Power limit can be set only through primary die */ if (aldebaran_is_primary(smu)) - return smu_v13_0_set_power_limit(smu, n); + return smu_v13_0_set_power_limit(smu, limit_type, limit); return -EINVAL; } @@ -1759,6 +1779,98 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int aldebaran_check_ecc_table_support(struct smu_context *smu) +{ + uint32_t if_version = 0xff, smu_version = 0xff; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) { + /* return not support if failed get smu_version */ + ret = -EOPNOTSUPP; + } + + if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION) + ret = -EOPNOTSUPP; + + return ret; +} + +static ssize_t aldebaran_get_ecc_info(struct smu_context *smu, + void *table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + EccInfoTable_t *ecc_table = NULL; + struct ecc_info_per_ch *ecc_info_per_channel = NULL; + int i, ret = 0; + struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; + + ret = aldebaran_check_ecc_table_support(smu); + if (ret) + return ret; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ECCINFO, + 0, + smu_table->ecc_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n"); + return ret; + } + + ecc_table = (EccInfoTable_t *)smu_table->ecc_table; + + for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) { + ecc_info_per_channel = &(eccinfo->ecc[i]); + ecc_info_per_channel->ce_count_lo_chip = + ecc_table->EccInfo[i].ce_count_lo_chip; + ecc_info_per_channel->ce_count_hi_chip = + ecc_table->EccInfo[i].ce_count_hi_chip; + ecc_info_per_channel->mca_umc_status = + ecc_table->EccInfo[i].mca_umc_status; + ecc_info_per_channel->mca_umc_addr = + ecc_table->EccInfo[i].mca_umc_addr; + } + + return ret; +} + +static int aldebaran_mode1_reset(struct smu_context *smu) +{ + u32 smu_version, fatal_err, param; + int ret = 0; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + fatal_err = 0; + param = SMU_RESET_MODE_1; + + /* + * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 + */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version < 0x00440700) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + } + else { + /* fatal error triggered by ras, PMFW supports the flag + from 68.44.0 */ + if ((smu_version >= 0x00442c00) && ras && + atomic_read(&ras->in_recovery)) + fatal_err = 1; + + param |= (fatal_err << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GfxDeviceDriverReset, param, NULL); + } + + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + static int aldebaran_mode2_reset(struct smu_context *smu) { u32 smu_version; @@ -1919,13 +2031,14 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, - .mode1_reset = smu_v13_0_mode1_reset, + .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, .mode2_reset = aldebaran_mode2_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = aldebaran_i2c_control_init, .i2c_fini = aldebaran_i2c_control_fini, .send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num, + .get_ecc_info = aldebaran_get_ecc_info, }; void aldebaran_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a0e50f23b1dd..55421ea622fb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -60,8 +60,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); #define SMU13_VOLTAGE_SCALE 4 -#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms - #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 @@ -89,12 +87,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu) if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_ALDEBARAN: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): chip_name = "aldebaran"; break; default: - dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type); + dev_err(adev->dev, "Unsupported IP version 0x%x\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } @@ -210,15 +209,17 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; - switch (smu->adev->asic_type) { - case CHIP_ALDEBARAN: + switch (smu->adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 2): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; - case CHIP_YELLOW_CARP: + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; break; default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); + dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", + smu->adev->ip_versions[MP1_HWIP][0]); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; break; } @@ -427,8 +428,10 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = NULL; + kfree(smu_table->ecc_table); kfree(smu_table->metrics_table); kfree(smu_table->watermarks_table); + smu_table->ecc_table = NULL; smu_table->metrics_table = NULL; smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; @@ -740,8 +743,9 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) int ret = 0; struct amdgpu_device *adev = smu->adev; - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(13, 0, 1): + case IP_VERSION(13, 0, 3): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -941,22 +945,27 @@ int smu_v13_0_get_current_power_limit(struct smu_context *smu, return ret; } -int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n) +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit) { int ret = 0; + if (limit_type != SMU_DEFAULT_PPT_LIMIT) + return -EINVAL; + if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); return -EOPNOTSUPP; } - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); if (ret) { dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); return ret; } - smu->current_power_limit = n; + smu->current_power_limit = limit; return 0; } @@ -1415,25 +1424,6 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) return ret; } -int smu_v13_0_mode1_reset(struct smu_context *smu) -{ - u32 smu_version; - int ret = 0; - /* - * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 - */ - smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (smu_version < 0x00440700) - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); - else - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL); - - if (!ret) - msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); - - return ret; -} - static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, uint64_t event_arg) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 627ba2eec7fd..caf1775d48ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -64,7 +64,6 @@ static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), - MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), @@ -135,14 +134,6 @@ static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { TAB_MAP_VALID(CUSTOM_DPM), TAB_MAP_VALID(DPMCLOCKS), }; - -static struct cmn2asic_mapping yellow_carp_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), - WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), -}; static int yellow_carp_init_smc_tables(struct smu_context *smu) { @@ -543,81 +534,6 @@ static int yellow_carp_set_watermarks_table(struct smu_context *smu, return 0; } -static int yellow_carp_get_power_profile_mode(struct smu_context *smu, - char *buf) -{ - static const char *profile_name[] = { - "BOOTUP_DEFAULT", - "3D_FULL_SCREEN", - "POWER_SAVING", - "VIDEO", - "VR", - "COMPUTE", - "CUSTOM"}; - uint32_t i, size = 0; - int16_t workload_type = 0; - - if (!buf) - return -EINVAL; - - for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { - /* - * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT. - * Not all profile modes are supported on yellow carp. - */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - i); - - if (workload_type < 0) - continue; - - size += sysfs_emit_at(buf, size, "%2d %14s%s\n", - i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); - } - - return size; -} - -static int yellow_carp_set_power_profile_mode(struct smu_context *smu, - long *input, uint32_t size) -{ - int workload_type, ret; - uint32_t profile_mode = input[size]; - - if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); - return -EINVAL; - } - - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || - profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, - profile_mode); - if (workload_type < 0) { - dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on YELLOWCARP\n", - profile_mode); - return -EINVAL; - } - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, - NULL); - if (ret) { - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); - return ret; - } - - smu->power_profile_mode = profile_mode; - - return 0; -} - static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, void **table) { @@ -781,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, case SMU_FCLK: return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, value); + case SMU_GFXCLK: + case SMU_SCLK: + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetGfxclkFrequency, 0, value); + break; default: return -EINVAL; } @@ -1051,17 +972,20 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t min, max; + + smu_cmn_get_sysfs_buf(&buf, &size); switch (clk_type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); break; @@ -1087,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, cur_value == value ? "*" : ""); } break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + goto print_clk_out; + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } @@ -1236,8 +1181,6 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, - .get_power_profile_mode = yellow_carp_get_power_profile_mode, - .set_power_profile_mode = yellow_carp_set_power_profile_mode, .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, @@ -1259,6 +1202,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu) smu->message_map = yellow_carp_message_map; smu->feature_map = yellow_carp_feature_mask_map; smu->table_map = yellow_carp_table_map; - smu->workload_map = yellow_carp_workload_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index b3ad8352c68a..a9205a8ea3ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,5 +24,6 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); +#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 66711ab24c15..500af6f8adcb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -97,7 +97,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, * smu: a pointer to SMU context * * Returns the status of the SMU, which could be, - * 0, the SMU is busy with your previous command; + * 0, the SMU is busy with your command; * 1, execution status: success, execution result: success; * 0xFF, execution status: success, execution result: failure; * 0xFE, unknown command; @@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, const char *message = smu_get_message_name(smu, msg); switch (reg_c2pmsg_90) { - case SMU_RESP_NONE: + case SMU_RESP_NONE: { + u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); + u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); dev_err_ratelimited(adev->dev, - "SMU: I'm not done with your previous command!"); + "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", + msg_idx, prm); + } break; case SMU_RESP_OK: /* The SMU executed the command. It completed with a @@ -348,7 +352,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res == -EREMOTEIO) + if (res != 0) __smu_cmn_reg_print_error(smu, reg, index, param, msg); if (read_arg) smu_cmn_read_arg(smu, read_arg); @@ -1053,3 +1057,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + bool snd_driver_loaded; + + /* + * If the ASIC comes with no audio function, we always assume + * it is "enabled". + */ + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (!p) + return true; + + snd_driver_loaded = pci_is_enabled(p) ? true : false; + + pci_dev_put(p); + + return snd_driver_loaded; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 16993daa2ae0..beea03810bca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); + #endif #endif diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index 3a9e966e0e78..58a242871b28 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -6,7 +6,6 @@ config DRM_HDLCD depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER help Choose this option if you have an ARM High Definition Colour LCD controller. @@ -27,7 +26,6 @@ config DRM_MALI_DISPLAY depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST) depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig index cec0639e3aa1..e91598b60781 100644 --- a/drivers/gpu/drm/arm/display/Kconfig +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -4,7 +4,6 @@ config DRM_KOMEDA depends on DRM && OF depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 21909642ee4c..147abf1a3968 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -15,6 +15,8 @@ #include "armada_gem.h" #include "armada_ioctlP.h" +MODULE_IMPORT_NS(DMA_BUF); + static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf) { struct drm_gem_object *gobj = vmf->vma->vm_private_data; @@ -336,7 +338,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_armada_gem_pwrite *args = data; struct armada_gem_object *dobj; char __user *ptr; - int ret; + int ret = 0; DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n", args->handle, args->offset, args->size, args->ptr); @@ -349,9 +351,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (!access_ok(ptr, args->size)) return -EFAULT; - ret = fault_in_pages_readable(ptr, args->size); - if (ret) - return ret; + if (fault_in_readable(ptr, args->size)) + return -EFAULT; dobj = armada_gem_object_lookup(file, args->handle); if (dobj == NULL) diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig index 5e95bcea43e9..024ccab14f88 100644 --- a/drivers/gpu/drm/aspeed/Kconfig +++ b/drivers/gpu/drm/aspeed/Kconfig @@ -5,7 +5,7 @@ config DRM_ASPEED_GFX depends on (COMPILE_TEST || ARCH_ASPEED) depends on MMU select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DMA_CMA if HAVE_DMA_CONTIGUOUS select CMA if HAVE_DMA_CONTIGUOUS select MFD_SYSCON diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index b53fee6f1c17..65f172807a0d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) if (rc) return rc; - return sprintf(buf, "%u\n", reg & 1); + return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw); diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 438a2d05b115..21f71160bc3e 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -3,6 +3,6 @@ # Makefile for the drm device driver. This driver provides support for the # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. -ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o +ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o obj-$(CONFIG_DRM_AST) := ast.o diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 86d5cd7b6318..6d8613f6fe1c 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -26,7 +26,6 @@ * Authors: Dave Airlie <airlied@redhat.com> */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> @@ -233,7 +232,7 @@ static struct pci_driver ast_pci_driver = { static int __init ast_init(void) { - if (vgacon_text_force() && ast_modeset == -1) + if (drm_firmware_drivers_only() && ast_modeset == -1) return -EINVAL; if (ast_modeset == 0) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 2cfce7dc95af..00bfa41ff7cb 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -357,4 +357,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); u8 ast_get_dp501_max_clk(struct drm_device *dev); void ast_init_3rdtx(struct drm_device *dev); +/* ast_i2c.c */ +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); + #endif diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c new file mode 100644 index 000000000000..93e91c36d649 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_i2c.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: MIT +/* + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +#include <drm/drm_managed.h> +#include <drm/drm_print.h> + +#include "ast_drv.h" + +static void ast_i2c_setsda(void *i2c_priv, int data) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static void ast_i2c_setscl(void *i2c_priv, int clock) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((clock & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static int ast_i2c_getsda(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static int ast_i2c_getscl(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static void ast_i2c_release(struct drm_device *dev, void *res) +{ + struct ast_i2c_chan *i2c = res; + + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) +{ + struct ast_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); + if (!i2c) + return NULL; + + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = dev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "AST i2c bit bus"); + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 20; + i2c->bit.timeout = 2; + i2c->bit.data = i2c; + i2c->bit.setsda = ast_i2c_setsda; + i2c->bit.setscl = ast_i2c_setscl; + i2c->bit.getsda = ast_i2c_getsda; + i2c->bit.getscl = ast_i2c_getscl; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + drm_err(dev, "Failed to register bit i2c\n"); + goto out_kfree; + } + + ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c); + if (ret) + return NULL; + return i2c; + +out_kfree: + kfree(i2c); + return NULL; +} diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 6bfaefa01818..44c2aafcb7c2 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -47,9 +47,6 @@ #include "ast_drv.h" #include "ast_tables.h" -static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); -static void ast_i2c_destroy(struct ast_i2c_chan *i2c); - static inline void ast_load_palette_index(struct ast_private *ast, u8 index, u8 red, u8 green, u8 blue) @@ -1210,9 +1207,9 @@ static int ast_get_modes(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); struct ast_private *ast = to_ast_private(connector->dev); - struct edid *edid; - int ret; + struct edid *edid = NULL; bool flags = false; + int ret; if (ast->tx_chip_type == AST_TX_DP501) { ast->dp501_maxclk = 0xff; @@ -1226,7 +1223,7 @@ static int ast_get_modes(struct drm_connector *connector) else kfree(edid); } - if (!flags) + if (!flags && ast_connector->i2c) edid = drm_get_edid(connector, &ast_connector->i2c->adapter); if (edid) { drm_connector_update_edid_property(&ast_connector->base, edid); @@ -1300,26 +1297,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, return flags; } -static enum drm_connector_status ast_connector_detect(struct drm_connector - *connector, bool force) -{ - int r; - - r = ast_get_modes(connector); - if (r <= 0) - return connector_status_disconnected; - - return connector_status_connected; -} - -static void ast_connector_destroy(struct drm_connector *connector) -{ - struct ast_connector *ast_connector = to_ast_connector(connector); - - ast_i2c_destroy(ast_connector->i2c); - drm_connector_cleanup(connector); -} - static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { .get_modes = ast_get_modes, .mode_valid = ast_mode_valid, @@ -1327,9 +1304,8 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { static const struct drm_connector_funcs ast_connector_funcs = { .reset = drm_atomic_helper_connector_reset, - .detect = ast_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = ast_connector_destroy, + .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -1345,18 +1321,20 @@ static int ast_connector_init(struct drm_device *dev) if (!ast_connector->i2c) drm_err(dev, "failed to add ddc bus for connector\n"); - drm_connector_init_with_ddc(dev, connector, - &ast_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - &ast_connector->i2c->adapter); + if (ast_connector->i2c) + drm_connector_init_with_ddc(dev, connector, &ast_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &ast_connector->i2c->adapter); + else + drm_connector_init(dev, connector, &ast_connector_funcs, + DRM_MODE_CONNECTOR_VGA); drm_connector_helper_add(connector, &ast_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT; drm_connector_attach_encoder(connector, encoder); @@ -1425,128 +1403,5 @@ int ast_mode_config_init(struct ast_private *ast) drm_mode_config_reset(dev); - drm_kms_helper_poll_init(dev); - return 0; } - -static int get_clock(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static int get_data(void *i2c_priv) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - uint32_t val, val2, count, pass; - - count = 0; - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - do { - val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - if (val == val2) { - pass++; - } else { - pass = 0; - val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01; - } - } while ((pass < 5) && (count++ < 0x10000)); - - return val & 1 ? 1 : 0; -} - -static void set_clock(void *i2c_priv, int clock) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((clock & 0x01) ? 0 : 1); - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01); - if (ujcrb7 == jtemp) - break; - } -} - -static void set_data(void *i2c_priv, int data) -{ - struct ast_i2c_chan *i2c = i2c_priv; - struct ast_private *ast = to_ast_private(i2c->dev); - int i; - u8 ujcrb7, jtemp; - - for (i = 0; i < 0x10000; i++) { - ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7); - jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04); - if (ujcrb7 == jtemp) - break; - } -} - -static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) -{ - struct ast_i2c_chan *i2c; - int ret; - - i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); - if (!i2c) - return NULL; - - i2c->adapter.owner = THIS_MODULE; - i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = dev->dev; - i2c->dev = dev; - i2c_set_adapdata(&i2c->adapter, i2c); - snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), - "AST i2c bit bus"); - i2c->adapter.algo_data = &i2c->bit; - - i2c->bit.udelay = 20; - i2c->bit.timeout = 2; - i2c->bit.data = i2c; - i2c->bit.setsda = set_data; - i2c->bit.setscl = set_clock; - i2c->bit.getsda = get_data; - i2c->bit.getscl = get_clock; - ret = i2c_bit_add_bus(&i2c->adapter); - if (ret) { - drm_err(dev, "Failed to register bit i2c\n"); - goto out_free; - } - - return i2c; -out_free: - kfree(i2c); - return NULL; -} - -static void ast_i2c_destroy(struct ast_i2c_chan *i2c) -{ - if (!i2c) - return; - i2c_del_adapter(&i2c->adapter); - kfree(i2c); -} diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig index 5f67f001553b..8ae679f1a518 100644 --- a/drivers/gpu/drm/atmel-hlcdc/Kconfig +++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig @@ -4,7 +4,6 @@ config DRM_ATMEL_HLCDC depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL help Choose this option if you have an ATMEL SoC with an HLCDC display diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 431b6e12a81f..61db5a66b493 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -182,6 +182,7 @@ config DRM_PARADE_PS8622 config DRM_PARADE_PS8640 tristate "Parade PS8640 MIPI DSI to eDP Converter" depends on OF + select DRM_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 05e3abb5a0c9..592ecfcf00ca 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -401,7 +401,6 @@ void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode); int adv7533_patch_registers(struct adv7511 *adv); int adv7533_patch_cec_registers(struct adv7511 *adv); int adv7533_attach_dsi(struct adv7511 *adv); -void adv7533_detach_dsi(struct adv7511 *adv); int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 76555ae64e9c..f8e5da148599 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -910,9 +910,6 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge, return ret; } - if (adv->type == ADV7533 || adv->type == ADV7535) - ret = adv7533_attach_dsi(adv); - if (adv->i2c_main->irq) regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_HPD); @@ -1288,8 +1285,18 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); + + if (adv7511->type == ADV7533 || adv7511->type == ADV7535) { + ret = adv7533_attach_dsi(adv7511); + if (ret) + goto err_unregister_audio; + } + return 0; +err_unregister_audio: + adv7511_audio_exit(adv7511); + drm_bridge_remove(&adv7511->bridge); err_unregister_cec: i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); @@ -1307,8 +1314,6 @@ static int adv7511_remove(struct i2c_client *i2c) { struct adv7511 *adv7511 = i2c_get_clientdata(i2c); - if (adv7511->type == ADV7533 || adv7511->type == ADV7535) - adv7533_detach_dsi(adv7511); i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index 59d718bde8c4..eb7579dec40a 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -153,11 +153,10 @@ int adv7533_attach_dsi(struct adv7511 *adv) return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_device; + return PTR_ERR(dsi); } adv->dsi = dsi; @@ -167,24 +166,13 @@ int adv7533_attach_dsi(struct adv7511 *adv) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } return 0; - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; -} - -void adv7533_detach_dsi(struct adv7511 *adv) -{ - mipi_dsi_detach(adv->dsi); - mipi_dsi_device_unregister(adv->dsi); } int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index cab6c8b92efd..6a4f20fccf84 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -998,11 +998,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, if (!blocking) return 0; + /* + * db[1]!=0: entering PSR, wait for fully active remote frame buffer. + * db[1]==0: exiting PSR, wait for either + * (a) ACTIVE_RESYNC - the sink "must display the + * incoming active frames from the Source device with no visible + * glitches and/or artifacts", even though timings may still be + * re-synchronizing; or + * (b) INACTIVE - the transition is fully complete. + */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || - (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, - DP_TIMEOUT_PSR_LOOP_MS * 1000); + (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || + psr_status == DP_PSR_SINK_INACTIVE))), + 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 1a871f6b6822..2346dbcc505f 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -32,6 +32,8 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> +#include <media/v4l2-fwnode.h> +#include <sound/hdmi-codec.h> #include <video/display_timing.h> #include "anx7625.h" @@ -166,6 +168,20 @@ static int anx7625_write_and_or(struct anx7625_data *ctx, offset, (val & and_mask) | (or_mask)); } +static int anx7625_config_bit_matrix(struct anx7625_data *ctx) +{ + int i, ret; + + ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CONTROL_REGISTER, 0x80); + for (i = 0; i < 13; i++) + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + VIDEO_BIT_MATRIX_12 + i, + 0x18 + i); + + return ret; +} + static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx) { return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS); @@ -191,10 +207,10 @@ static int wait_aux_op_finish(struct anx7625_data *ctx) AP_AUX_CTRL_STATUS); if (val < 0 || (val & 0x0F)) { DRM_DEV_ERROR(dev, "aux status %02x\n", val); - val = -EIO; + return -EIO; } - return val; + return 0; } static int anx7625_video_mute_control(struct anx7625_data *ctx, @@ -221,38 +237,6 @@ static int anx7625_video_mute_control(struct anx7625_data *ctx, return ret; } -static int anx7625_config_audio_input(struct anx7625_data *ctx) -{ - struct device *dev = &ctx->client->dev; - int ret; - - /* Channel num */ - ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5); - - /* FS */ - ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_4, - 0xf0, AUDIO_FS_48K); - /* Word length */ - ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_5, - 0xf0, AUDIO_W_LEN_24_24MAX); - /* I2S */ - ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, - AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE); - ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, - AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE); - /* Audio change flag */ - ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, - AP_AV_STATUS, AP_AUDIO_CHG); - - if (ret < 0) - DRM_DEV_ERROR(dev, "fail to config audio.\n"); - - return ret; -} - /* Reduction of fraction a/b */ static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b) { @@ -431,7 +415,7 @@ static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_0, 0xfc); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, - MIPI_LANE_CTRL_0, 3); + MIPI_LANE_CTRL_0, ctx->pdata.mipi_lanes - 1); /* Htotal */ htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min + @@ -615,6 +599,76 @@ static int anx7625_dsi_config(struct anx7625_data *ctx) return ret; } +static int anx7625_api_dpi_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + u16 freq = ctx->dt.pixelclock.min / 1000; + int ret; + + /* configure pixel clock */ + ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + PIXEL_CLOCK_L, freq & 0xFF); + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, + PIXEL_CLOCK_H, (freq >> 8)); + + /* set DPI mode */ + /* set to DPI PLL module sel */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_PLL_9, 0x20); + /* power down MIPI */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_LANE_CTRL_10, 0x08); + /* enable DPI mode */ + ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, + MIPI_DIGITAL_PLL_18, 0x1C); + /* set first edge */ + ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, + VIDEO_CONTROL_0, 0x06); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : dpi phy set failed.\n"); + + return ret; +} + +static int anx7625_dpi_config(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n"); + + /* DSC disable */ + ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + R_DSC_CTRL_0, ~DSC_EN); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : disable dsc failed.\n"); + return ret; + } + + ret = anx7625_config_bit_matrix(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "config bit matrix failed.\n"); + return ret; + } + + ret = anx7625_api_dpi_config(ctx); + if (ret < 0) { + DRM_DEV_ERROR(dev, "mipi phy(dpi) setup failed.\n"); + return ret; + } + + /* set MIPI RX EN */ + ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_MIPI_RX_EN); + /* clear mute flag */ + ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, (u8)~AP_MIPI_MUTE); + if (ret < 0) + DRM_DEV_ERROR(dev, "IO error : enable mipi rx failed.\n"); + + return ret; +} + static void anx7625_dp_start(struct anx7625_data *ctx) { int ret; @@ -625,9 +679,10 @@ static void anx7625_dp_start(struct anx7625_data *ctx) return; } - anx7625_config_audio_input(ctx); - - ret = anx7625_dsi_config(ctx); + if (ctx->pdata.is_dpi) + ret = anx7625_dpi_config(ctx); + else + ret = anx7625_dsi_config(ctx); if (ret < 0) DRM_DEV_ERROR(dev, "MIPI phy setup error.\n"); @@ -795,7 +850,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; - u8 g_edid_break = 0; + int g_edid_break = 0; int ret; struct device *dev = &ctx->client->dev; @@ -826,7 +881,7 @@ static int sp_tx_edid_read(struct anx7625_data *ctx, g_edid_break = edid_read(ctx, offset, pblock_buf); - if (g_edid_break) + if (g_edid_break < 0) break; memcpy(&pedid_blocks_buf[offset], @@ -1075,6 +1130,7 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) return; } + ctx->hpd_status = 1; ctx->hpd_high_cnt++; /* Not support HDCP */ @@ -1084,8 +1140,10 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx) ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); /* Interrupt for DRM */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); - if (ret < 0) + if (ret < 0) { + DRM_DEV_ERROR(dev, "fail to setting HDCP/auth\n"); return; + } ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86); if (ret < 0) @@ -1104,6 +1162,10 @@ static void anx7625_hpd_polling(struct anx7625_data *ctx) int ret, val; struct device *dev = &ctx->client->dev; + /* Interrupt mode, no need poll HPD status, just return */ + if (ctx->pdata.intp_irq) + return; + ret = readx_poll_timeout(anx7625_read_hpd_status_p0, ctx, val, ((val & HPD_STATUS) || (val < 0)), @@ -1131,6 +1193,21 @@ static void anx7625_remove_edid(struct anx7625_data *ctx) ctx->slimport_edid_p.edid_block_num = -1; } +static void anx7625_dp_adjust_swing(struct anx7625_data *ctx) +{ + int i; + + for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++) + anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, + DP_TX_LANE0_SWING_REG0 + i, + ctx->pdata.lane0_reg_data[i] & 0xFF); + + for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++) + anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, + DP_TX_LANE1_SWING_REG0 + i, + ctx->pdata.lane1_reg_data[i] & 0xFF); +} + static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) { struct device *dev = &ctx->client->dev; @@ -1146,9 +1223,8 @@ static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) } else { DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n"); anx7625_start_dp_work(ctx); + anx7625_dp_adjust_swing(ctx); } - - ctx->hpd_status = 1; } static int anx7625_hpd_change_detect(struct anx7625_data *ctx) @@ -1225,20 +1301,75 @@ static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data) return IRQ_HANDLED; } +static int anx7625_get_swing_setting(struct device *dev, + struct anx7625_platform_data *pdata) +{ + int num_regs; + + if (of_get_property(dev->of_node, + "analogix,lane0-swing", &num_regs)) { + if (num_regs > DP_TX_SWING_REG_CNT) + num_regs = DP_TX_SWING_REG_CNT; + + pdata->dp_lane0_swing_reg_cnt = num_regs; + of_property_read_u32_array(dev->of_node, "analogix,lane0-swing", + pdata->lane0_reg_data, num_regs); + } + + if (of_get_property(dev->of_node, + "analogix,lane1-swing", &num_regs)) { + if (num_regs > DP_TX_SWING_REG_CNT) + num_regs = DP_TX_SWING_REG_CNT; + + pdata->dp_lane1_swing_reg_cnt = num_regs; + of_property_read_u32_array(dev->of_node, "analogix,lane1-swing", + pdata->lane1_reg_data, num_regs); + } + + return 0; +} + static int anx7625_parse_dt(struct device *dev, struct anx7625_platform_data *pdata) { - struct device_node *np = dev->of_node; + struct device_node *np = dev->of_node, *ep0; struct drm_panel *panel; int ret; + int bus_type, mipi_lanes; + + anx7625_get_swing_setting(dev, pdata); + pdata->is_dpi = 1; /* default dpi mode */ pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0); if (!pdata->mipi_host_node) { DRM_DEV_ERROR(dev, "fail to get internal panel.\n"); return -ENODEV; } - DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n"); + bus_type = V4L2_FWNODE_BUS_TYPE_PARALLEL; + mipi_lanes = MAX_LANES_SUPPORT; + ep0 = of_graph_get_endpoint_by_regs(np, 0, 0); + if (ep0) { + if (of_property_read_u32(ep0, "bus-type", &bus_type)) + bus_type = 0; + + mipi_lanes = of_property_count_u32_elems(ep0, "data-lanes"); + } + + if (bus_type == V4L2_FWNODE_BUS_TYPE_PARALLEL) /* bus type is Parallel(DSI) */ + pdata->is_dpi = 0; + + pdata->mipi_lanes = mipi_lanes; + if (pdata->mipi_lanes > MAX_LANES_SUPPORT || pdata->mipi_lanes <= 0) + pdata->mipi_lanes = MAX_LANES_SUPPORT; + + if (pdata->is_dpi) + DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DPI host node.\n"); + else + DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DSI host node.\n"); + + if (of_property_read_bool(np, "analogix,audio-enable")) + pdata->audio_en = 1; ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); if (ret < 0) { @@ -1301,9 +1432,215 @@ static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) { struct device *dev = &ctx->client->dev; - DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n"); + DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n"); - return connector_status_connected; + if (ctx->pdata.panel_bridge) + return connector_status_connected; + + return ctx->hpd_status ? connector_status_connected : + connector_status_disconnected; +} + +static int anx7625_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *fmt, + struct hdmi_codec_params *params) +{ + struct anx7625_data *ctx = dev_get_drvdata(dev); + int wl, ch, rate; + int ret = 0; + + if (fmt->fmt != HDMI_DSP_A) { + DRM_DEV_ERROR(dev, "only supports DSP_A\n"); + return -EINVAL; + } + + DRM_DEV_DEBUG_DRIVER(dev, "setting %d Hz, %d bit, %d channels\n", + params->sample_rate, params->sample_width, + params->cea.channels); + + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, + ~I2S_SLAVE_MODE, + TDM_SLAVE_MODE); + + /* Word length */ + switch (params->sample_width) { + case 16: + wl = AUDIO_W_LEN_16_20MAX; + break; + case 18: + wl = AUDIO_W_LEN_18_20MAX; + break; + case 20: + wl = AUDIO_W_LEN_20_20MAX; + break; + case 24: + wl = AUDIO_W_LEN_24_24MAX; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", + params->sample_width); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_5, + 0xf0, wl); + + /* Channel num */ + switch (params->cea.channels) { + case 2: + ch = I2S_CH_2; + break; + case 4: + ch = TDM_CH_4; + break; + case 6: + ch = TDM_CH_6; + break; + case 8: + ch = TDM_CH_8; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", + params->cea.channels); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, 0x1f, ch << 5); + if (ch > I2S_CH_2) + ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, AUDIO_LAYOUT); + else + ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_6, ~AUDIO_LAYOUT); + + /* FS */ + switch (params->sample_rate) { + case 32000: + rate = AUDIO_FS_32K; + break; + case 44100: + rate = AUDIO_FS_441K; + break; + case 48000: + rate = AUDIO_FS_48K; + break; + case 88200: + rate = AUDIO_FS_882K; + break; + case 96000: + rate = AUDIO_FS_96K; + break; + case 176400: + rate = AUDIO_FS_1764K; + break; + case 192000: + rate = AUDIO_FS_192K; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d not support", + params->sample_rate); + return -EINVAL; + } + ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, + AUDIO_CHANNEL_STATUS_4, + 0xf0, rate); + ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, + AP_AV_STATUS, AP_AUDIO_CHG); + if (ret < 0) { + DRM_DEV_ERROR(dev, "IO error : config audio.\n"); + return -EIO; + } + + return 0; +} + +static void anx7625_audio_shutdown(struct device *dev, void *data) +{ + DRM_DEV_DEBUG_DRIVER(dev, "stop audio\n"); +} + +static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component, + struct device_node *endpoint) +{ + struct of_endpoint of_ep; + int ret; + + ret = of_graph_parse_endpoint(endpoint, &of_ep); + if (ret < 0) + return ret; + + /* + * HDMI sound should be located at external DPI port + * Didn't have good way to check where is internal(DSI) + * or external(DPI) bridge + */ + return 0; +} + +static void +anx7625_audio_update_connector_status(struct anx7625_data *ctx, + enum drm_connector_status status) +{ + if (ctx->plugged_cb && ctx->codec_dev) { + ctx->plugged_cb(ctx->codec_dev, + status == connector_status_connected); + } +} + +static int anx7625_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct anx7625_data *ctx = data; + + ctx->plugged_cb = fn; + ctx->codec_dev = codec_dev; + anx7625_audio_update_connector_status(ctx, anx7625_sink_detect(ctx)); + + return 0; +} + +static const struct hdmi_codec_ops anx7625_codec_ops = { + .hw_params = anx7625_audio_hw_params, + .audio_shutdown = anx7625_audio_shutdown, + .get_dai_id = anx7625_hdmi_i2s_get_dai_id, + .hook_plugged_cb = anx7625_audio_hook_plugged_cb, +}; + +static void anx7625_unregister_audio(struct anx7625_data *ctx) +{ + struct device *dev = &ctx->client->dev; + + if (ctx->audio_pdev) { + platform_device_unregister(ctx->audio_pdev); + ctx->audio_pdev = NULL; + } + + DRM_DEV_DEBUG_DRIVER(dev, "unbound to %s", HDMI_CODEC_DRV_NAME); +} + +static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &anx7625_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, + .data = ctx, + }; + + ctx->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + + if (IS_ERR(ctx->audio_pdev)) + return PTR_ERR(ctx->audio_pdev); + + DRM_DEV_DEBUG_DRIVER(dev, "bound to %s", HDMI_CODEC_DRV_NAME); + + return 0; } static int anx7625_attach_dsi(struct anx7625_data *ctx) @@ -1316,6 +1653,7 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) .channel = 0, .node = NULL, }; + int ret; DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n"); @@ -1325,22 +1663,22 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) return -EINVAL; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { DRM_DEV_ERROR(dev, "fail to create dsi device.\n"); return -EINVAL; } - dsi->lanes = 4; + dsi->lanes = ctx->pdata.mipi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - if (mipi_dsi_attach(dsi) < 0) { + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret) { DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n"); - mipi_dsi_device_unregister(dsi); - return -EINVAL; + return ret; } ctx->dsi = dsi; @@ -1350,16 +1688,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx) return 0; } -static void anx7625_bridge_detach(struct drm_bridge *bridge) -{ - struct anx7625_data *ctx = bridge_to_anx7625(bridge); - - if (ctx->dsi) { - mipi_dsi_detach(ctx->dsi); - mipi_dsi_device_unregister(ctx->dsi); - } -} - static int anx7625_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -1376,12 +1704,6 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, return -ENODEV; } - err = anx7625_attach_dsi(ctx); - if (err) { - DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err); - return err; - } - if (ctx->pdata.panel_bridge) { err = drm_bridge_attach(bridge->encoder, ctx->pdata.panel_bridge, @@ -1475,6 +1797,10 @@ static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge, DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n"); + /* No need fixup for external monitor */ + if (!ctx->pdata.panel_bridge) + return true; + hsync = mode->hsync_end - mode->hsync_start; hfp = mode->hsync_start - mode->hdisplay; hbp = mode->htotal - mode->hsync_end; @@ -1624,7 +1950,6 @@ static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs anx7625_bridge_funcs = { .attach = anx7625_bridge_attach, - .detach = anx7625_bridge_detach, .disable = anx7625_bridge_disable, .mode_valid = anx7625_bridge_mode_valid, .mode_set = anx7625_bridge_mode_set, @@ -1851,14 +2176,39 @@ static int anx7625_i2c_probe(struct i2c_client *client, platform->bridge.funcs = &anx7625_bridge_funcs; platform->bridge.of_node = client->dev.of_node; - platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; - platform->bridge.type = DRM_MODE_CONNECTOR_eDP; + platform->bridge.ops = DRM_BRIDGE_OP_EDID; + if (!platform->pdata.panel_bridge) + platform->bridge.ops |= DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_DETECT; + platform->bridge.type = platform->pdata.panel_bridge ? + DRM_MODE_CONNECTOR_eDP : + DRM_MODE_CONNECTOR_DisplayPort; + drm_bridge_add(&platform->bridge); + if (!platform->pdata.is_dpi) { + ret = anx7625_attach_dsi(platform); + if (ret) { + DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", ret); + goto unregister_bridge; + } + } + + if (platform->pdata.audio_en) + anx7625_register_audio(dev, platform); + DRM_DEV_DEBUG_DRIVER(dev, "probe done\n"); return 0; +unregister_bridge: + drm_bridge_remove(&platform->bridge); + + if (!platform->pdata.low_power_mode) + pm_runtime_put_sync_suspend(&client->dev); + + anx7625_unregister_i2c_dummy_clients(platform); + free_wq: if (platform->workqueue) destroy_workqueue(platform->workqueue); @@ -1883,6 +2233,9 @@ static int anx7625_i2c_remove(struct i2c_client *client) anx7625_unregister_i2c_dummy_clients(platform); + if (platform->pdata.audio_en) + anx7625_unregister_audio(platform); + kfree(platform); return 0; } diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h index 6dcf64c703f9..3d79b6fb13c8 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.h +++ b/drivers/gpu/drm/bridge/analogix/anx7625.h @@ -111,6 +111,7 @@ #define AUDIO_CHANNEL_STATUS_6 0xd5 #define TDM_SLAVE_MODE 0x10 #define I2S_SLAVE_MODE 0x08 +#define AUDIO_LAYOUT 0x01 #define AUDIO_CONTROL_REGISTER 0xe6 #define TDM_TIMING_MODE 0x08 @@ -141,12 +142,20 @@ #define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */ /******** END of I2C Address 0x72 *********/ + +/***************************************************************/ +/* Register definition of device address 0x7a */ +#define DP_TX_SWING_REG_CNT 0x14 +#define DP_TX_LANE0_SWING_REG0 0x00 +#define DP_TX_LANE1_SWING_REG0 0x14 +/******** END of I2C Address 0x7a *********/ + /***************************************************************/ /* Register definition of device address 0x7e */ #define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E -#define FLASH_LOAD_STA 0x05 +#define FLASH_LOAD_STA 0x05 #define FLASH_LOAD_STA_CHK BIT(7) #define XTAL_FRQ_SEL 0x3F @@ -349,12 +358,21 @@ struct s_edid_data { /***************** Display End *****************/ +#define MAX_LANES_SUPPORT 4 + struct anx7625_platform_data { struct gpio_desc *gpio_p_on; struct gpio_desc *gpio_reset; struct regulator_bulk_data supplies[3]; struct drm_bridge *panel_bridge; int intp_irq; + int is_dpi; + int mipi_lanes; + int audio_en; + int dp_lane0_swing_reg_cnt; + int lane0_reg_data[DP_TX_SWING_REG_CNT]; + int dp_lane1_swing_reg_cnt; + int lane1_reg_data[DP_TX_SWING_REG_CNT]; u32 low_power_mode; struct device_node *mipi_host_node; }; @@ -371,6 +389,7 @@ struct anx7625_i2c_client { struct anx7625_data { struct anx7625_platform_data pdata; + struct platform_device *audio_pdev; int hpd_status; int hpd_high_cnt; /* Lock for work queue */ @@ -379,6 +398,8 @@ struct anx7625_data { struct anx7625_i2c_client i2c; struct i2c_client *last_client; struct s_edid_data slimport_edid_p; + struct device *codec_dev; + hdmi_codec_plugged_cb plugged_cb; struct work_struct work; struct workqueue_struct *workqueue; char edid_block; diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 05eb759da6fc..d24f5b90feab 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -87,10 +88,95 @@ static struct edid *display_connector_get_edid(struct drm_bridge *bridge, return drm_get_edid(connector, conn->bridge.ddc); } +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the output bus formats, either pass the bus formats from the + * previous bridge or return fallback data like done in the bridge function: + * drm_atomic_bridge_chain_select_bus_fmts(). + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { + struct drm_connector *conn = conn_state->connector; + u32 *out_bus_fmts; + + *num_output_fmts = 1; + out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); + if (!out_bus_fmts) + return NULL; + + if (conn->display_info.num_bus_formats && + conn->display_info.bus_formats) + out_bus_fmts[0] = conn->display_info.bus_formats[0]; + else + out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return out_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, + num_output_fmts); +} + +/* + * Since this bridge is tied to the connector, it acts like a passthrough, + * so concerning the input bus formats, either pass the bus formats from the + * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive()) + * when atomic_get_input_bus_fmts is not supported. + * This supports negotiation if the bridge chain has all bits in place. + */ +static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); + struct drm_bridge_state *prev_bridge_state; + + if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { + u32 *in_bus_fmts; + + *num_input_fmts = 1; + in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL); + if (!in_bus_fmts) + return NULL; + + in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; + + return in_bus_fmts; + } + + prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, + prev_bridge); + + return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state, + crtc_state, conn_state, output_fmt, + num_input_fmts); +} + static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, .detect = display_connector_detect, .get_edid = display_connector_get_edid, + .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, + .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, }; static irqreturn_t display_connector_hpd_irq(int irq, void *arg) @@ -107,7 +193,7 @@ static int display_connector_probe(struct platform_device *pdev) { struct display_connector *conn; unsigned int type; - const char *label; + const char *label = NULL; int ret; conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 1b0c7eaf6c84..c642d1e02b2f 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -472,11 +472,11 @@ static int lt8912_attach_dsi(struct lt8912 *lt) return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { ret = PTR_ERR(dsi); dev_err(dev, "failed to create dsi device (%d)\n", ret); - goto err_dsi_device; + return ret; } lt->dsi = dsi; @@ -489,24 +489,13 @@ static int lt8912_attach_dsi(struct lt8912 *lt) MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } return 0; - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; -} - -static void lt8912_detach_dsi(struct lt8912 *lt) -{ - mipi_dsi_detach(lt->dsi); - mipi_dsi_device_unregister(lt->dsi); } static int lt8912_bridge_connector_init(struct drm_bridge *bridge) @@ -555,10 +544,6 @@ static int lt8912_bridge_attach(struct drm_bridge *bridge, if (ret) goto error; - ret = lt8912_attach_dsi(lt); - if (ret) - goto error; - lt->is_attached = true; return 0; @@ -573,7 +558,6 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge) struct lt8912 *lt = bridge_to_lt8912(bridge); if (lt->is_attached) { - lt8912_detach_dsi(lt); lt8912_hard_power_off(lt); drm_connector_unregister(<->connector); drm_connector_cleanup(<->connector); @@ -718,8 +702,15 @@ static int lt8912_probe(struct i2c_client *client, drm_bridge_add(<->bridge); + ret = lt8912_attach_dsi(lt); + if (ret) + goto err_attach; + return 0; +err_attach: + drm_bridge_remove(<->bridge); + lt8912_free_i2c(lt); err_i2c: lt8912_put_dt(lt); err_dt_parse: diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 29b1ce2140ab..dafb1b47c15f 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -586,7 +586,7 @@ lt9611_connector_detect(struct drm_connector *connector, bool force) int connected = 0; regmap_read(lt9611->regmap, 0x825e, ®_val); - connected = (reg_val & BIT(2)); + connected = (reg_val & BIT(0)); lt9611->status = connected ? connector_status_connected : connector_status_disconnected; @@ -760,6 +760,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, const struct mipi_dsi_device_info info = { "lt9611", 0, NULL }; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; + struct device *dev = lt9611->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); @@ -768,7 +769,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, return ERR_PTR(-EPROBE_DEFER); } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(lt9611->dev, "failed to create dsi device\n"); return dsi; @@ -779,29 +780,15 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { - dev_err(lt9611->dev, "failed to attach dsi to host\n"); - mipi_dsi_device_unregister(dsi); + dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } return dsi; } -static void lt9611_bridge_detach(struct drm_bridge *bridge) -{ - struct lt9611 *lt9611 = bridge_to_lt9611(bridge); - - if (lt9611->dsi1) { - mipi_dsi_detach(lt9611->dsi1); - mipi_dsi_device_unregister(lt9611->dsi1); - } - - mipi_dsi_detach(lt9611->dsi0); - mipi_dsi_device_unregister(lt9611->dsi0); -} - static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611) { int ret; @@ -838,28 +825,7 @@ static int lt9611_bridge_attach(struct drm_bridge *bridge, return ret; } - /* Attach primary DSI */ - lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node); - if (IS_ERR(lt9611->dsi0)) - return PTR_ERR(lt9611->dsi0); - - /* Attach secondary DSI, if specified */ - if (lt9611->dsi1_node) { - lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node); - if (IS_ERR(lt9611->dsi1)) { - ret = PTR_ERR(lt9611->dsi1); - goto err_unregister_dsi0; - } - } - return 0; - -err_unregister_dsi0: - lt9611_bridge_detach(bridge); - drm_connector_cleanup(<9611->connector); - mipi_dsi_device_unregister(lt9611->dsi0); - - return ret; } static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, @@ -926,7 +892,7 @@ static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge) int connected; regmap_read(lt9611->regmap, 0x825e, ®_val); - connected = reg_val & BIT(2); + connected = reg_val & BIT(0); lt9611->status = connected ? connector_status_connected : connector_status_disconnected; @@ -952,7 +918,6 @@ static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge) static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, - .detach = lt9611_bridge_detach, .mode_valid = lt9611_bridge_mode_valid, .enable = lt9611_bridge_enable, .disable = lt9611_bridge_disable, @@ -1181,10 +1146,29 @@ static int lt9611_probe(struct i2c_client *client, drm_bridge_add(<9611->bridge); + /* Attach primary DSI */ + lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node); + if (IS_ERR(lt9611->dsi0)) { + ret = PTR_ERR(lt9611->dsi0); + goto err_remove_bridge; + } + + /* Attach secondary DSI, if specified */ + if (lt9611->dsi1_node) { + lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node); + if (IS_ERR(lt9611->dsi1)) { + ret = PTR_ERR(lt9611->dsi1); + goto err_remove_bridge; + } + } + lt9611_enable_hpd_interrupts(lt9611); return lt9611_audio_init(dev, lt9611); +err_remove_bridge: + drm_bridge_remove(<9611->bridge); + err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 010657ea7af7..33f9716da0ee 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -258,17 +258,18 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; + struct device *dev = lt9611uxc->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); if (!host) { - dev_err(lt9611uxc->dev, "failed to find dsi host\n"); + dev_err(dev, "failed to find dsi host\n"); return ERR_PTR(-EPROBE_DEFER); } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { - dev_err(lt9611uxc->dev, "failed to create dsi device\n"); + dev_err(dev, "failed to create dsi device\n"); return dsi; } @@ -277,10 +278,9 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { - dev_err(lt9611uxc->dev, "failed to attach dsi to host\n"); - mipi_dsi_device_unregister(dsi); + dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } @@ -355,19 +355,6 @@ static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc return drm_connector_attach_encoder(<9611uxc->connector, bridge->encoder); } -static void lt9611uxc_bridge_detach(struct drm_bridge *bridge) -{ - struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); - - if (lt9611uxc->dsi1) { - mipi_dsi_detach(lt9611uxc->dsi1); - mipi_dsi_device_unregister(lt9611uxc->dsi1); - } - - mipi_dsi_detach(lt9611uxc->dsi0); - mipi_dsi_device_unregister(lt9611uxc->dsi0); -} - static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { @@ -380,27 +367,7 @@ static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, return ret; } - /* Attach primary DSI */ - lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); - if (IS_ERR(lt9611uxc->dsi0)) - return PTR_ERR(lt9611uxc->dsi0); - - /* Attach secondary DSI, if specified */ - if (lt9611uxc->dsi1_node) { - lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); - if (IS_ERR(lt9611uxc->dsi1)) { - ret = PTR_ERR(lt9611uxc->dsi1); - goto err_unregister_dsi0; - } - } - return 0; - -err_unregister_dsi0: - mipi_dsi_detach(lt9611uxc->dsi0); - mipi_dsi_device_unregister(lt9611uxc->dsi0); - - return ret; } static enum drm_mode_status @@ -544,7 +511,6 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { .attach = lt9611uxc_bridge_attach, - .detach = lt9611uxc_bridge_detach, .mode_valid = lt9611uxc_bridge_mode_valid, .mode_set = lt9611uxc_bridge_mode_set, .detect = lt9611uxc_bridge_detect, @@ -980,8 +946,27 @@ retry: drm_bridge_add(<9611uxc->bridge); + /* Attach primary DSI */ + lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); + if (IS_ERR(lt9611uxc->dsi0)) { + ret = PTR_ERR(lt9611uxc->dsi0); + goto err_remove_bridge; + } + + /* Attach secondary DSI, if specified */ + if (lt9611uxc->dsi1_node) { + lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); + if (IS_ERR(lt9611uxc->dsi1)) { + ret = PTR_ERR(lt9611uxc->dsi1); + goto err_remove_bridge; + } + } + return lt9611uxc_audio_init(dev, lt9611uxc); +err_remove_bridge: + drm_bridge_remove(<9611uxc->bridge); + err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index ad460b96c0a3..702ea803a743 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -14,12 +14,14 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> struct lvds_codec { struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; + struct drm_bridge_timings timings; struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; @@ -118,7 +120,7 @@ static int lvds_codec_probe(struct platform_device *pdev) struct device_node *bus_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; - const char *mapping; + u32 val; int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); @@ -174,32 +176,38 @@ static int lvds_codec_probe(struct platform_device *pdev) return -ENXIO; } - ret = of_property_read_string(bus_node, "data-mapping", - &mapping); + ret = drm_of_lvds_get_data_mapping(bus_node); of_node_put(bus_node); - if (ret < 0) { + if (ret == -ENODEV) { dev_warn(dev, "missing 'data-mapping' DT property\n"); + } else if (ret) { + dev_err(dev, "invalid 'data-mapping' DT property\n"); + return ret; } else { - if (!strcmp(mapping, "jeida-18")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; - } else if (!strcmp(mapping, "jeida-24")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; - } else if (!strcmp(mapping, "vesa-24")) { - lvds_codec->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; - } else { - dev_err(dev, "invalid 'data-mapping' DT property\n"); - return -EINVAL; - } + lvds_codec->bus_format = ret; lvds_codec->bridge.funcs = &funcs_decoder; } } /* + * Encoder might sample data on different clock edge than the display, + * for example OnSemi FIN3385 has a dedicated strapping pin to select + * the sampling edge. + */ + if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS && + !of_property_read_u32(dev->of_node, "pclk-sample", &val)) { + lvds_codec->timings.input_bus_flags = val ? + DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE : + DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; + } + + /* * The panel_bridge bridge is attached to the panel's of_node, * but we need a bridge attached to our of_node for our user * to look up. */ lvds_codec->bridge.of_node = dev->of_node; + lvds_codec->bridge.timings = &lvds_codec->timings; drm_bridge_add(&lvds_codec->bridge); platform_set_drvdata(pdev, lvds_codec); diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index d2808c4a6fb1..cce98bf2a4e7 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -306,19 +306,10 @@ out: mutex_unlock(&ge_b850v3_lvds_dev_mutex); } -static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, - const struct i2c_device_id *id) +static int ge_b850v3_register(void) { + struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; struct device *dev = &stdp4028_i2c->dev; - int ret; - - ret = ge_b850v3_lvds_init(dev); - - if (ret) - return ret; - - ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; - i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); /* drm bridge initialization */ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs; @@ -343,6 +334,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, "ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr); } +static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &stdp4028_i2c->dev; + int ret; + + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; + + ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; + i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); + + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp2690_i2c) + return 0; + + return ge_b850v3_register(); +} + static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c) { ge_b850v3_lvds_remove(); @@ -386,7 +398,11 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); - return 0; + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp4028_i2c) + return 0; + + return ge_b850v3_register(); } static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c) diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index 3aaa90913bf8..818704bf5e86 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -9,10 +9,12 @@ #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <drm/drm_bridge.h> +#include <drm/drm_dp_aux_bus.h> #include <drm/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> @@ -100,7 +102,7 @@ struct ps8640 { struct regulator_bulk_data supplies[2]; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; - bool powered; + bool pre_enabled; }; static const struct regmap_config ps8640_regmap_config[] = { @@ -148,8 +150,46 @@ static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux) return container_of(aux, struct ps8640, aux); } -static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, - struct drm_dp_aux_msg *msg) +static bool ps8640_of_panel_on_aux_bus(struct device *dev) +{ + struct device_node *bus, *panel; + + bus = of_get_child_by_name(dev->of_node, "aux-bus"); + if (!bus) + return false; + + panel = of_get_child_by_name(bus, "panel"); + of_node_put(bus); + if (!panel) + return false; + of_node_put(panel); + + return true; +} + +static int ps8640_ensure_hpd(struct ps8640 *ps_bridge) +{ + struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; + struct device *dev = &ps_bridge->page[PAGE2_TOP_CNTL]->dev; + int status; + int ret; + + /* + * Apparently something about the firmware in the chip signals that + * HPD goes high by reporting GPIO9 as high (even though HPD isn't + * actually connected to GPIO9). + */ + ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, + status & PS_GPIO9, 20 * 1000, 200 * 1000); + + if (ret < 0) + dev_warn(dev, "HPD didn't go high: %d\n", ret); + + return ret; +} + +static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL]; @@ -274,38 +314,49 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, return len; } -static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, - const enum ps8640_vdo_control ctrl) +static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct ps8640 *ps_bridge = aux_to_ps8640(aux); + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; + int ret; + + pm_runtime_get_sync(dev); + ret = ps8640_ensure_hpd(ps_bridge); + if (!ret) + ret = ps8640_aux_transfer_msg(aux, msg); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static void ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, + const enum ps8640_vdo_control ctrl) { struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1]; + struct device *dev = &ps_bridge->page[PAGE3_DSI_CNTL1]->dev; u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl }; int ret; ret = regmap_bulk_write(map, PAGE3_SET_ADD, vdo_ctrl_buf, sizeof(vdo_ctrl_buf)); - if (ret < 0) { - DRM_ERROR("failed to %sable VDO: %d\n", - ctrl == ENABLE ? "en" : "dis", ret); - return ret; - } - - return 0; + if (ret < 0) + dev_err(dev, "failed to %sable VDO: %d\n", + ctrl == ENABLE ? "en" : "dis", ret); } -static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) +static int __maybe_unused ps8640_resume(struct device *dev) { - struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; - int ret, status; - - if (ps_bridge->powered) - return; + struct ps8640 *ps_bridge = dev_get_drvdata(dev); + int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) { - DRM_ERROR("cannot enable regulators %d\n", ret); - return; + dev_err(dev, "cannot enable regulators %d\n", ret); + return ret; } gpiod_set_value(ps_bridge->gpio_powerdown, 0); @@ -314,86 +365,78 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge) gpiod_set_value(ps_bridge->gpio_reset, 0); /* - * Wait for the ps8640 embedded MCU to be ready - * First wait 200ms and then check the MCU ready flag every 20ms + * Mystery 200 ms delay for the "MCU to be ready". It's unclear if + * this is truly necessary since the MCU will already signal that + * things are "good to go" by signaling HPD on "gpio 9". See + * ps8640_ensure_hpd(). For now we'll keep this mystery delay just in + * case. */ msleep(200); - ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, - status & PS_GPIO9, 20 * 1000, 200 * 1000); - - if (ret < 0) { - DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret); - goto err_regulators_disable; - } - - msleep(50); - - /* - * The Manufacturer Command Set (MCS) is a device dependent interface - * intended for factory programming of the display module default - * parameters. Once the display module is configured, the MCS shall be - * disabled by the manufacturer. Once disabled, all MCS commands are - * ignored by the display interface. - */ - - ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0); - if (ret < 0) { - DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret); - goto err_regulators_disable; - } - - /* Switch access edp panel's edid through i2c */ - ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); - if (ret < 0) { - DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret); - goto err_regulators_disable; - } - - ps_bridge->powered = true; - - return; - -err_regulators_disable: - regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies), - ps_bridge->supplies); + return 0; } -static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge) +static int __maybe_unused ps8640_suspend(struct device *dev) { + struct ps8640 *ps_bridge = dev_get_drvdata(dev); int ret; - if (!ps_bridge->powered) - return; - gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1); ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) - DRM_ERROR("cannot disable regulators %d\n", ret); + dev_err(dev, "cannot disable regulators %d\n", ret); - ps_bridge->powered = false; + return ret; } +static const struct dev_pm_ops ps8640_pm_ops = { + SET_RUNTIME_PM_OPS(ps8640_suspend, ps8640_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) +}; + static void ps8640_pre_enable(struct drm_bridge *bridge) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; + struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; int ret; - ps8640_bridge_poweron(ps_bridge); + pm_runtime_get_sync(dev); + ps8640_ensure_hpd(ps_bridge); - ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE); + /* + * The Manufacturer Command Set (MCS) is a device dependent interface + * intended for factory programming of the display module default + * parameters. Once the display module is configured, the MCS shall be + * disabled by the manufacturer. Once disabled, all MCS commands are + * ignored by the display interface. + */ + + ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0); if (ret < 0) - ps8640_bridge_poweroff(ps_bridge); + dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); + + /* Switch access edp panel's edid through i2c */ + ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); + if (ret < 0) + dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); + + ps8640_bridge_vdo_control(ps_bridge, ENABLE); + + ps_bridge->pre_enabled = true; } static void ps8640_post_disable(struct drm_bridge *bridge) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); + ps_bridge->pre_enabled = false; + ps8640_bridge_vdo_control(ps_bridge, DISABLE); - ps8640_bridge_poweroff(ps_bridge); + pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev); } static int ps8640_bridge_attach(struct drm_bridge *bridge, @@ -401,68 +444,21 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge, { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct device *dev = &ps_bridge->page[0]->dev; - struct device_node *in_ep, *dsi_node; - struct mipi_dsi_device *dsi; - struct mipi_dsi_host *host; int ret; - const struct mipi_dsi_device_info info = { .type = "ps8640", - .channel = 0, - .node = NULL, - }; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - /* port@0 is ps8640 dsi input port */ - in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); - if (!in_ep) - return -ENODEV; - - dsi_node = of_graph_get_remote_port_parent(in_ep); - of_node_put(in_ep); - if (!dsi_node) - return -ENODEV; - - host = of_find_mipi_dsi_host_by_node(dsi_node); - of_node_put(dsi_node); - if (!host) - return -ENODEV; - - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - return ret; - } - - ps_bridge->dsi = dsi; - - dsi->host = host; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | - MIPI_DSI_MODE_VIDEO_SYNC_PULSE; - dsi->format = MIPI_DSI_FMT_RGB888; - dsi->lanes = NUM_MIPI_LANES; - ret = mipi_dsi_attach(dsi); - if (ret) { - dev_err(dev, "failed to attach dsi device: %d\n", ret); - goto err_dsi_attach; - } - + ps_bridge->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&ps_bridge->aux); if (ret) { dev_err(dev, "failed to register DP AUX channel: %d\n", ret); - goto err_aux_register; + return ret; } /* Attach the panel-bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge, &ps_bridge->bridge, flags); - -err_aux_register: - mipi_dsi_detach(dsi); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); - return ret; } static void ps8640_bridge_detach(struct drm_bridge *bridge) @@ -474,7 +470,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); - bool poweroff = !ps_bridge->powered; + bool poweroff = !ps_bridge->pre_enabled; struct edid *edid; /* @@ -504,6 +500,12 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge, return edid; } +static void ps8640_runtime_disable(void *data) +{ + pm_runtime_dont_use_autosuspend(data); + pm_runtime_disable(data); +} + static const struct drm_bridge_funcs ps8640_bridge_funcs = { .attach = ps8640_bridge_attach, .detach = ps8640_bridge_detach, @@ -512,6 +514,53 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = { .pre_enable = ps8640_pre_enable, }; +static int ps8640_bridge_host_attach(struct device *dev, struct ps8640 *ps_bridge) +{ + struct device_node *in_ep, *dsi_node; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + int ret; + const struct mipi_dsi_device_info info = { .type = "ps8640", + .channel = 0, + .node = NULL, + }; + + /* port@0 is ps8640 dsi input port */ + in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); + if (!in_ep) + return -ENODEV; + + dsi_node = of_graph_get_remote_port_parent(in_ep); + of_node_put(in_ep); + if (!dsi_node) + return -ENODEV; + + host = of_find_mipi_dsi_host_by_node(dsi_node); + of_node_put(dsi_node); + if (!host) + return -EPROBE_DEFER; + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) { + dev_err(dev, "failed to create dsi device\n"); + return PTR_ERR(dsi); + } + + ps_bridge->dsi = dsi; + + dsi->host = host; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->lanes = NUM_MIPI_LANES; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret) + return ret; + + return 0; +} + static int ps8640_probe(struct i2c_client *client) { struct device *dev = &client->dev; @@ -525,17 +574,6 @@ static int ps8640_probe(struct i2c_client *client) if (!ps_bridge) return -ENOMEM; - /* port@1 is ps8640 output port */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); - if (ret < 0) - return ret; - if (!panel) - return -ENODEV; - - ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel); - if (IS_ERR(ps_bridge->panel_bridge)) - return PTR_ERR(ps_bridge->panel_bridge); - ps_bridge->supplies[0].supply = "vdd33"; ps_bridge->supplies[1].supply = "vdd12"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), @@ -558,9 +596,16 @@ static int ps8640_probe(struct i2c_client *client) ps_bridge->bridge.funcs = &ps8640_bridge_funcs; ps_bridge->bridge.of_node = dev->of_node; - ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP; + /* + * In the device tree, if panel is listed under aux-bus of the bridge + * node, panel driver should be able to retrieve EDID by itself using + * aux-bus. So let's not set DRM_BRIDGE_OP_EDID here. + */ + if (!ps8640_of_panel_on_aux_bus(&client->dev)) + ps_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; + ps_bridge->page[PAGE0_DP_CNTL] = client; ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config); @@ -587,9 +632,46 @@ static int ps8640_probe(struct i2c_client *client) ps_bridge->aux.transfer = ps8640_aux_transfer; drm_dp_aux_init(&ps_bridge->aux); + pm_runtime_enable(dev); + /* + * Powering on ps8640 takes ~300ms. To avoid wasting time on power + * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure + * the bridge wouldn't suspend in between each _aux_transfer_msg() call + * during EDID read (~20ms in my experiment) and in between the last + * _aux_transfer_msg() call during EDID read and the _pre_enable() call + * (~100ms in my experiment). + */ + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_suspend_ignore_children(dev, true); + ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev); + if (ret) + return ret; + + devm_of_dp_aux_populate_ep_devices(&ps_bridge->aux); + + /* port@1 is ps8640 output port */ + ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL); + if (ret < 0) + return ret; + if (!panel) + return -ENODEV; + + ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(ps_bridge->panel_bridge)) + return PTR_ERR(ps_bridge->panel_bridge); + drm_bridge_add(&ps_bridge->bridge); + ret = ps8640_bridge_host_attach(dev, ps_bridge); + if (ret) + goto err_bridge_remove; + return 0; + +err_bridge_remove: + drm_bridge_remove(&ps_bridge->bridge); + return ret; } static int ps8640_remove(struct i2c_client *client) @@ -613,6 +695,7 @@ static struct i2c_driver ps8640_driver = { .driver = { .name = "ps8640", .of_match_table = ps8640_match, + .pm = &ps8640_pm_ops, }, }; module_i2c_driver(ps8640_driver); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index d0db1acf11d7..7d2ed0ed2fe2 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -320,13 +320,17 @@ static int dw_hdmi_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; void __iomem *base = dw->data.base; + u8 *eld; int ret; runtime->hw = dw_hdmi_hw; - ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld); - if (ret < 0) - return ret; + eld = dw->data.get_eld(dw->data.hdmi); + if (eld) { + ret = snd_pcm_hw_constraint_eld(runtime, eld); + if (ret < 0) + return ret; + } ret = snd_pcm_limit_hw_rates(runtime); if (ret < 0) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h index cb07dc0da5a7..f72d27208ebe 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h @@ -9,15 +9,15 @@ struct dw_hdmi_audio_data { void __iomem *base; int irq; struct dw_hdmi *hdmi; - u8 *eld; + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; struct dw_hdmi_i2s_audio_data { struct dw_hdmi *hdmi; - u8 *eld; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); u8 (*read)(struct dw_hdmi *hdmi, int offset); + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; #endif diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index feb04f127b55..f50b47ac11a8 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -135,8 +135,15 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct dw_hdmi_i2s_audio_data *audio = data; + u8 *eld; + + eld = audio->get_eld(audio->hdmi); + if (eld) + memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); + else + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); - memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len)); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index f08d0fded61f..54d8fdad395f 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -757,6 +757,14 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } +static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) +{ + if (!hdmi->curr_conn) + return NULL; + + return hdmi->curr_conn->eld; +} + static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -3413,6 +3421,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; + hdmi->bridge.interlace_allowed = true; #ifdef CONFIG_OF hdmi->bridge.of_node = pdev->dev.of_node; #endif @@ -3431,7 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; @@ -3444,7 +3453,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index a3db532bbdd1..fd585bf925fe 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -237,6 +237,10 @@ static void tc358768_hw_enable(struct tc358768_priv *priv) if (priv->enabled) return; + ret = clk_prepare_enable(priv->refclk); + if (ret < 0) + dev_err(priv->dev, "error enabling refclk (%d)\n", ret); + ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret < 0) dev_err(priv->dev, "error enabling regulators (%d)\n", ret); @@ -274,6 +278,8 @@ static void tc358768_hw_disable(struct tc358768_priv *priv) if (ret < 0) dev_err(priv->dev, "error disabling regulators (%d)\n", ret); + clk_disable_unprepare(priv->refclk); + priv->enabled = false; } @@ -625,12 +631,19 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct mipi_dsi_device *dsi_dev = priv->output.dev; + unsigned long mode_flags = dsi_dev->mode_flags; u32 val, val2, lptxcnt, hact, data_type; const struct drm_display_mode *mode; u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk; - u32 dsiclk, dsibclk; + u32 dsiclk, dsibclk, video_start; + const u32 internal_delay = 40; int ret, i; + if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n"); + mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS; + } + tc358768_hw_enable(priv); ret = tc358768_sw_reset(priv); @@ -657,23 +670,27 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) case MIPI_DSI_FMT_RGB888: val |= (0x3 << 4); hact = mode->hdisplay * 3; + video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; break; case MIPI_DSI_FMT_RGB666: val |= (0x4 << 4); hact = mode->hdisplay * 3; + video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; break; case MIPI_DSI_FMT_RGB666_PACKED: val |= (0x4 << 4) | BIT(3); hact = mode->hdisplay * 18 / 8; + video_start = (mode->htotal - mode->hsync_start) * 18 / 8; data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; break; case MIPI_DSI_FMT_RGB565: val |= (0x5 << 4); hact = mode->hdisplay * 2; + video_start = (mode->htotal - mode->hsync_start) * 2; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; break; default: @@ -684,7 +701,8 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) } /* VSDly[9:0] */ - tc358768_write(priv, TC358768_VSDLY, 1); + video_start = max(video_start, internal_delay + 1) - internal_delay; + tc358768_write(priv, TC358768_VSDLY, video_start); tc358768_write(priv, TC358768_DATAFMT, val); tc358768_write(priv, TC358768_DSITX_DT, data_type); @@ -764,7 +782,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) val |= BIT(i + 1); tc358768_write(priv, TC358768_HSTXVREGEN, val); - if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1); /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */ @@ -772,31 +790,61 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1; val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk), dsibclk_nsk) - 2; - val |= val2 << 16; + val = val << 16 | val2; dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val); tc358768_write(priv, TC358768_BTACNTRL1, val); /* START[0] */ tc358768_write(priv, TC358768_STARTCNTRL, 1); - /* Set event mode */ - tc358768_write(priv, TC358768_DSI_EVENT, 1); - - /* vsw (+ vbp) */ - tc358768_write(priv, TC358768_DSI_VSW, - mode->vtotal - mode->vsync_start); - /* vbp (not used in event mode) */ - tc358768_write(priv, TC358768_DSI_VBPR, 0); - /* vact */ - tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); - - /* (hsw + hbp) * byteclk * ndl / pclk */ - val = (u32)div_u64((mode->htotal - mode->hsync_start) * - ((u64)priv->dsiclk / 4) * priv->dsi_lanes, - mode->clock * 1000); - tc358768_write(priv, TC358768_DSI_HSW, val); - /* hbp (not used in event mode) */ - tc358768_write(priv, TC358768_DSI_HBPR, 0); + if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { + /* Set pulse mode */ + tc358768_write(priv, TC358768_DSI_EVENT, 0); + + /* vact */ + tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + + /* vsw */ + tc358768_write(priv, TC358768_DSI_VSW, + mode->vsync_end - mode->vsync_start); + /* vbp */ + tc358768_write(priv, TC358768_DSI_VBPR, + mode->vtotal - mode->vsync_end); + + /* hsw * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->hsync_end - mode->hsync_start) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HSW, val); + + /* hbp * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->htotal - mode->hsync_end) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HBPR, val); + } else { + /* Set event mode */ + tc358768_write(priv, TC358768_DSI_EVENT, 1); + + /* vact */ + tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); + + /* vsw (+ vbp) */ + tc358768_write(priv, TC358768_DSI_VSW, + mode->vtotal - mode->vsync_start); + /* vbp (not used in event mode) */ + tc358768_write(priv, TC358768_DSI_VBPR, 0); + + /* (hsw + hbp) * byteclk * ndl / pclk */ + val = (u32)div_u64((mode->htotal - mode->hsync_start) * + ((u64)priv->dsiclk / 4) * priv->dsi_lanes, + mode->clock * 1000); + tc358768_write(priv, TC358768_DSI_HSW, val); + + /* hbp (not used in event mode) */ + tc358768_write(priv, TC358768_DSI_HBPR, 0); + } + /* hact (bytes) */ tc358768_write(priv, TC358768_DSI_HACT, hact); @@ -822,7 +870,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)) val |= TC358768_DSI_CONTROL_TXMD; - if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) val |= TC358768_DSI_CONTROL_HSCKMD; if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 2272adcc5b4a..2c76331b251d 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -594,11 +594,26 @@ static int tc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); + + /* Attach the panel-bridge to the dsi bridge */ + return drm_bridge_attach(bridge->encoder, tc->panel_bridge, + &tc->bridge, flags); +} + +static const struct drm_bridge_funcs tc_bridge_funcs = { + .attach = tc_bridge_attach, + .pre_enable = tc_bridge_pre_enable, + .enable = tc_bridge_enable, + .mode_valid = tc_mode_valid, + .post_disable = tc_bridge_post_disable, +}; + +static int tc_attach_host(struct tc_data *tc) +{ struct device *dev = &tc->i2c->dev; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; int ret; - const struct mipi_dsi_device_info info = { .type = "tc358775", .channel = 0, .node = NULL, @@ -610,11 +625,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge, return -EPROBE_DEFER; } - dsi = mipi_dsi_device_register_full(host, &info); + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_device; + return PTR_ERR(dsi); } tc->dsi = dsi; @@ -623,29 +637,15 @@ static int tc_bridge_attach(struct drm_bridge *bridge, dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO; - ret = mipi_dsi_attach(dsi); + ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; + return ret; } - /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, tc->panel_bridge, - &tc->bridge, flags); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); -err_dsi_device: - return ret; + return 0; } -static const struct drm_bridge_funcs tc_bridge_funcs = { - .attach = tc_bridge_attach, - .pre_enable = tc_bridge_pre_enable, - .enable = tc_bridge_enable, - .mode_valid = tc_mode_valid, - .post_disable = tc_bridge_post_disable, -}; - static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; @@ -709,7 +709,15 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, tc); + ret = tc_attach_host(tc); + if (ret) + goto err_bridge_remove; + return 0; + +err_bridge_remove: + drm_bridge_remove(&tc->bridge); + return ret; } static int tc_remove(struct i2c_client *client) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index ba1160ec6d6e..945f08de45f1 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -245,47 +245,9 @@ static int sn65dsi83_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - struct device *dev = ctx->dev; - struct mipi_dsi_device *dsi; - struct mipi_dsi_host *host; - int ret = 0; - - const struct mipi_dsi_device_info info = { - .type = "sn65dsi83", - .channel = 0, - .node = NULL, - }; - - host = of_find_mipi_dsi_host_by_node(ctx->host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } - - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - return dev_err_probe(dev, PTR_ERR(dsi), - "failed to create dsi device\n"); - } - - ctx->dsi = dsi; - - dsi->lanes = ctx->dsi_lanes; - dsi->format = MIPI_DSI_FMT_RGB888; - dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; - - ret = mipi_dsi_attach(dsi); - if (ret < 0) { - dev_err(dev, "failed to attach dsi to host\n"); - goto err_dsi_attach; - } return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, &ctx->bridge, flags); - -err_dsi_attach: - mipi_dsi_device_unregister(dsi); - return ret; } static void sn65dsi83_detach(struct drm_bridge *bridge) @@ -295,28 +257,9 @@ static void sn65dsi83_detach(struct drm_bridge *bridge) if (!ctx->dsi) return; - mipi_dsi_detach(ctx->dsi); - mipi_dsi_device_unregister(ctx->dsi); - drm_bridge_remove(&ctx->bridge); ctx->dsi = NULL; } -static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - - /* - * Reset the chip, pull EN line low for t_reset=10ms, - * then high for t_en=1ms. - */ - regcache_mark_dirty(ctx->regmap); - gpiod_set_value(ctx->enable_gpio, 0); - usleep_range(10000, 11000); - gpiod_set_value(ctx->enable_gpio, 1); - usleep_range(1000, 1100); -} - static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx, const struct drm_display_mode *mode) { @@ -394,6 +337,10 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, u16 val; int ret; + /* Deassert reset */ + gpiod_set_value(ctx->enable_gpio, 1); + usleep_range(1000, 1100); + /* Get the LVDS format from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -540,18 +487,11 @@ static void sn65dsi83_atomic_disable(struct drm_bridge *bridge, { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - /* Clear reset, disable PLL */ - regmap_write(ctx->regmap, REG_RC_RESET, 0x00); - regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); -} - -static void sn65dsi83_atomic_post_disable(struct drm_bridge *bridge, - struct drm_bridge_state *old_bridge_state) -{ - struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - - /* Put the chip in reset, pull EN line low. */ + /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ gpiod_set_value(ctx->enable_gpio, 0); + usleep_range(10000, 11000); + + regcache_mark_dirty(ctx->regmap); } static enum drm_mode_status @@ -597,10 +537,8 @@ sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge, static const struct drm_bridge_funcs sn65dsi83_funcs = { .attach = sn65dsi83_attach, .detach = sn65dsi83_detach, - .atomic_pre_enable = sn65dsi83_atomic_pre_enable, .atomic_enable = sn65dsi83_atomic_enable, .atomic_disable = sn65dsi83_atomic_disable, - .atomic_post_disable = sn65dsi83_atomic_post_disable, .mode_valid = sn65dsi83_mode_valid, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, @@ -664,6 +602,44 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) return 0; } +static int sn65dsi83_host_attach(struct sn65dsi83 *ctx) +{ + struct device *dev = ctx->dev; + struct mipi_dsi_device *dsi; + struct mipi_dsi_host *host; + const struct mipi_dsi_device_info info = { + .type = "sn65dsi83", + .channel = 0, + .node = NULL, + }; + int ret; + + host = of_find_mipi_dsi_host_by_node(ctx->host_node); + if (!host) { + dev_err(dev, "failed to find dsi host\n"); + return -EPROBE_DEFER; + } + + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return dev_err_probe(dev, PTR_ERR(dsi), + "failed to create dsi device\n"); + + ctx->dsi = dsi; + + dsi->lanes = ctx->dsi_lanes; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; + + ret = devm_mipi_dsi_attach(dev, dsi); + if (ret < 0) { + dev_err(dev, "failed to attach dsi to host: %d\n", ret); + return ret; + } + + return 0; +} + static int sn65dsi83_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -685,10 +661,13 @@ static int sn65dsi83_probe(struct i2c_client *client, model = id->driver_data; } + /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ ctx->enable_gpio = devm_gpiod_get(ctx->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ctx->enable_gpio)) return PTR_ERR(ctx->enable_gpio); + usleep_range(10000, 11000); + ret = sn65dsi83_parse_dt(ctx, model); if (ret) return ret; @@ -704,13 +683,22 @@ static int sn65dsi83_probe(struct i2c_client *client, ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); + ret = sn65dsi83_host_attach(ctx); + if (ret) + goto err_remove_bridge; + return 0; + +err_remove_bridge: + drm_bridge_remove(&ctx->bridge); + return ret; } static int sn65dsi83_remove(struct i2c_client *client) { struct sn65dsi83 *ctx = i2c_get_clientdata(client); + drm_bridge_remove(&ctx->bridge); of_node_put(ctx->host_node); return 0; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 6154bed0af5b..dab8f76618f3 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -4,7 +4,9 @@ * datasheet: https://www.ti.com/lit/ds/symlink/sn65dsi86.pdf */ +#include <linux/atomic.h> #include <linux/auxiliary_bus.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/debugfs.h> @@ -15,6 +17,7 @@ #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pm_runtime.h> +#include <linux/pwm.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> @@ -91,6 +94,13 @@ #define SN_ML_TX_MODE_REG 0x96 #define ML_TX_MAIN_LINK_OFF 0 #define ML_TX_NORMAL_MODE BIT(0) +#define SN_PWM_PRE_DIV_REG 0xA0 +#define SN_BACKLIGHT_SCALE_REG 0xA1 +#define BACKLIGHT_SCALE_MAX 0xFFFF +#define SN_BACKLIGHT_REG 0xA3 +#define SN_PWM_EN_INV_REG 0xA5 +#define SN_PWM_INV_MASK BIT(0) +#define SN_PWM_EN_MASK BIT(1) #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) @@ -113,11 +123,14 @@ #define SN_LINK_TRAINING_TRIES 10 +#define SN_PWM_GPIO_IDX 3 /* 4th GPIO */ + /** * struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver. * @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality. * @gpio_aux: AUX-bus sub device for GPIO controller functionality. * @aux_aux: AUX-bus sub device for eDP AUX channel functionality. + * @pwm_aux: AUX-bus sub device for PWM controller functionality. * * @dev: Pointer to the top level (i2c) device. * @regmap: Regmap for accessing i2c. @@ -145,11 +158,17 @@ * bitmap so we can do atomic ops on it without an extra * lock so concurrent users of our 4 GPIOs don't stomp on * each other's read-modify-write. + * + * @pchip: pwm_chip if the PWM is exposed. + * @pwm_enabled: Used to track if the PWM signal is currently enabled. + * @pwm_pin_busy: Track if GPIO4 is currently requested for GPIO or PWM. + * @pwm_refclk_freq: Cache for the reference clock input to the PWM. */ struct ti_sn65dsi86 { struct auxiliary_device bridge_aux; struct auxiliary_device gpio_aux; struct auxiliary_device aux_aux; + struct auxiliary_device pwm_aux; struct device *dev; struct regmap *regmap; @@ -172,6 +191,12 @@ struct ti_sn65dsi86 { struct gpio_chip gchip; DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS); #endif +#if defined(CONFIG_PWM) + struct pwm_chip pchip; + bool pwm_enabled; + atomic_t pwm_pin_busy; +#endif + unsigned int pwm_refclk_freq; }; static const struct regmap_range ti_sn65dsi86_volatile_ranges[] = { @@ -188,13 +213,30 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = { .val_bits = 8, .volatile_table = &ti_sn_bridge_volatile_table, .cache_type = REGCACHE_NONE, + .max_register = 0xFF, }; +static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, + unsigned int reg, u16 *val) +{ + u8 buf[2]; + int ret; + + ret = regmap_bulk_read(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); + if (ret) + return ret; + + *val = buf[0] | (buf[1] << 8); + + return 0; +} + static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 val) { - regmap_write(pdata->regmap, reg, val & 0xFF); - regmap_write(pdata->regmap, reg + 1, val >> 8); + u8 buf[2] = { val & 0xff, val >> 8 }; + + regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); } static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata) @@ -253,6 +295,12 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, REFCLK_FREQ(i)); + + /* + * The PWM refclk is based on the value written to SN_DPPLL_SRC_REG, + * regardless of its actual sourcing. + */ + pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i]; } static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) @@ -655,58 +703,24 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) return container_of(bridge, struct ti_sn65dsi86, bridge); } -static int ti_sn_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) +static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) { - int ret, val; - struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + int val; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; + struct device *dev = pdata->dev; const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge", .channel = 0, .node = NULL, - }; - - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { - DRM_ERROR("Fix bridge driver to make connector optional!"); - return -EINVAL; - } - - pdata->aux.drm_dev = bridge->dev; - ret = drm_dp_aux_register(&pdata->aux); - if (ret < 0) { - drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret); - return ret; - } - - ret = ti_sn_bridge_connector_init(pdata); - if (ret < 0) - goto err_conn_init; + }; - /* - * TODO: ideally finding host resource and dsi dev registration needs - * to be done in bridge probe. But some existing DSI host drivers will - * wait for any of the drm_bridge/drm_panel to get added to the global - * bridge/panel list, before completing their probe. So if we do the - * dsi dev registration part in bridge probe, before populating in - * the global bridge list, then it will cause deadlock as dsi host probe - * will never complete, neither our bridge probe. So keeping it here - * will satisfy most of the existing host drivers. Once the host driver - * is fixed we can move the below code to bridge probe safely. - */ host = of_find_mipi_dsi_host_by_node(pdata->host_node); - if (!host) { - DRM_ERROR("failed to find dsi host\n"); - ret = -ENODEV; - goto err_dsi_host; - } + if (!host) + return -EPROBE_DEFER; - dsi = mipi_dsi_device_register_full(host, &info); - if (IS_ERR(dsi)) { - DRM_ERROR("failed to create dsi device\n"); - ret = PTR_ERR(dsi); - goto err_dsi_host; - } + dsi = devm_mipi_dsi_device_register_full(dev, host, &info); + if (IS_ERR(dsi)) + return PTR_ERR(dsi); /* TODO: setting to 4 MIPI lanes always for now */ dsi->lanes = 4; @@ -714,18 +728,38 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, dsi->mode_flags = MIPI_DSI_MODE_VIDEO; /* check if continuous dsi clock is required or not */ - pm_runtime_get_sync(pdata->dev); + pm_runtime_get_sync(dev); regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val); - pm_runtime_put_autosuspend(pdata->dev); + pm_runtime_put_autosuspend(dev); if (!(val & DPPLL_CLK_SRC_DSICLK)) dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS; - ret = mipi_dsi_attach(dsi); + pdata->dsi = dsi; + + return devm_mipi_dsi_attach(dev, dsi); +} + +static int ti_sn_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + int ret; + + if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { + DRM_ERROR("Fix bridge driver to make connector optional!"); + return -EINVAL; + } + + pdata->aux.drm_dev = bridge->dev; + ret = drm_dp_aux_register(&pdata->aux); if (ret < 0) { - DRM_ERROR("failed to attach dsi to host\n"); - goto err_dsi_attach; + drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret); + return ret; } - pdata->dsi = dsi; + + ret = ti_sn_bridge_connector_init(pdata); + if (ret < 0) + goto err_conn_init; /* We never want the next bridge to *also* create a connector: */ flags |= DRM_BRIDGE_ATTACH_NO_CONNECTOR; @@ -734,14 +768,10 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, &pdata->bridge, flags); if (ret < 0) - goto err_dsi_detach; + goto err_dsi_host; return 0; -err_dsi_detach: - mipi_dsi_detach(dsi); -err_dsi_attach: - mipi_dsi_device_unregister(dsi); err_dsi_host: drm_connector_cleanup(&pdata->connector); err_conn_init: @@ -1227,7 +1257,17 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, drm_bridge_add(&pdata->bridge); + ret = ti_sn_attach_host(pdata); + if (ret) { + dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n"); + goto err_remove_bridge; + } + return 0; + +err_remove_bridge: + drm_bridge_remove(&pdata->bridge); + return ret; } static void ti_sn_bridge_remove(struct auxiliary_device *adev) @@ -1237,11 +1277,6 @@ static void ti_sn_bridge_remove(struct auxiliary_device *adev) if (!pdata) return; - if (pdata->dsi) { - mipi_dsi_detach(pdata->dsi); - mipi_dsi_device_unregister(pdata->dsi); - } - drm_bridge_remove(&pdata->bridge); of_node_put(pdata->host_node); @@ -1260,9 +1295,287 @@ static struct auxiliary_driver ti_sn_bridge_driver = { }; /* ----------------------------------------------------------------------------- - * GPIO Controller + * PWM Controller */ +#if defined(CONFIG_PWM) +static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) +{ + return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0; +} + +static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) +{ + atomic_set(&pdata->pwm_pin_busy, 0); +} + +static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip) +{ + return container_of(chip, struct ti_sn65dsi86, pchip); +} + +static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + + return ti_sn_pwm_pin_request(pdata); +} + +static void ti_sn_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + + ti_sn_pwm_pin_release(pdata); +} + +/* + * Limitations: + * - The PWM signal is not driven when the chip is powered down, or in its + * reset state and the driver does not implement the "suspend state" + * described in the documentation. In order to save power, state->enabled is + * interpreted as denoting if the signal is expected to be valid, and is used + * to determine if the chip needs to be kept powered. + * - Changing both period and duty_cycle is not done atomically, neither is the + * multi-byte register updates, so the output might briefly be undefined + * during update. + */ +static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + unsigned int pwm_en_inv; + unsigned int backlight; + unsigned int pre_div; + unsigned int scale; + u64 period_max; + u64 period; + int ret; + + if (!pdata->pwm_enabled) { + ret = pm_runtime_get_sync(pdata->dev); + if (ret < 0) { + pm_runtime_put_sync(pdata->dev); + return ret; + } + } + + if (state->enabled) { + if (!pdata->pwm_enabled) { + /* + * The chip might have been powered down while we + * didn't hold a PM runtime reference, so mux in the + * PWM function on the GPIO pin again. + */ + ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG, + SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX), + SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX)); + if (ret) { + dev_err(pdata->dev, "failed to mux in PWM function\n"); + goto out; + } + } + + /* + * Per the datasheet the PWM frequency is given by: + * + * REFCLK_FREQ + * PWM_FREQ = ----------------------------------- + * PWM_PRE_DIV * BACKLIGHT_SCALE + 1 + * + * However, after careful review the author is convinced that + * the documentation has lost some parenthesis around + * "BACKLIGHT_SCALE + 1". + * + * With the period T_pwm = 1/PWM_FREQ this can be written: + * + * T_pwm * REFCLK_FREQ = PWM_PRE_DIV * (BACKLIGHT_SCALE + 1) + * + * In order to keep BACKLIGHT_SCALE within its 16 bits, + * PWM_PRE_DIV must be: + * + * T_pwm * REFCLK_FREQ + * PWM_PRE_DIV >= ------------------------- + * BACKLIGHT_SCALE_MAX + 1 + * + * To simplify the search and to favour higher resolution of + * the duty cycle over accuracy of the period, the lowest + * possible PWM_PRE_DIV is used. Finally the scale is + * calculated as: + * + * T_pwm * REFCLK_FREQ + * BACKLIGHT_SCALE = ---------------------- - 1 + * PWM_PRE_DIV + * + * Here T_pwm is represented in seconds, so appropriate scaling + * to nanoseconds is necessary. + */ + + /* Minimum T_pwm is 1 / REFCLK_FREQ */ + if (state->period <= NSEC_PER_SEC / pdata->pwm_refclk_freq) { + ret = -EINVAL; + goto out; + } + + /* + * Maximum T_pwm is 255 * (65535 + 1) / REFCLK_FREQ + * Limit period to this to avoid overflows + */ + period_max = div_u64((u64)NSEC_PER_SEC * 255 * (65535 + 1), + pdata->pwm_refclk_freq); + period = min(state->period, period_max); + + pre_div = DIV64_U64_ROUND_UP(period * pdata->pwm_refclk_freq, + (u64)NSEC_PER_SEC * (BACKLIGHT_SCALE_MAX + 1)); + scale = div64_u64(period * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * pre_div) - 1; + + /* + * The documentation has the duty ratio given as: + * + * duty BACKLIGHT + * ------- = --------------------- + * period BACKLIGHT_SCALE + 1 + * + * Solve for BACKLIGHT, substituting BACKLIGHT_SCALE according + * to definition above and adjusting for nanosecond + * representation of duty cycle gives us: + */ + backlight = div64_u64(state->duty_cycle * pdata->pwm_refclk_freq, + (u64)NSEC_PER_SEC * pre_div); + if (backlight > scale) + backlight = scale; + + ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div); + if (ret) { + dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n"); + goto out; + } + + ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_SCALE_REG, scale); + ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_REG, backlight); + } + + pwm_en_inv = FIELD_PREP(SN_PWM_EN_MASK, state->enabled) | + FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED); + ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv); + if (ret) { + dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n"); + goto out; + } + + pdata->pwm_enabled = state->enabled; +out: + + if (!pdata->pwm_enabled) + pm_runtime_put_sync(pdata->dev); + + return ret; +} + +static void ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); + unsigned int pwm_en_inv; + unsigned int pre_div; + u16 backlight; + u16 scale; + int ret; + + ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv); + if (ret) + return; + + ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale); + if (ret) + return; + + ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight); + if (ret) + return; + + ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div); + if (ret) + return; + + state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv); + if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv)) + state->polarity = PWM_POLARITY_INVERSED; + else + state->polarity = PWM_POLARITY_NORMAL; + + state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * (scale + 1), + pdata->pwm_refclk_freq); + state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * backlight, + pdata->pwm_refclk_freq); + + if (state->duty_cycle > state->period) + state->duty_cycle = state->period; +} +static const struct pwm_ops ti_sn_pwm_ops = { + .request = ti_sn_pwm_request, + .free = ti_sn_pwm_free, + .apply = ti_sn_pwm_apply, + .get_state = ti_sn_pwm_get_state, + .owner = THIS_MODULE, +}; + +static int ti_sn_pwm_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + + pdata->pchip.dev = pdata->dev; + pdata->pchip.ops = &ti_sn_pwm_ops; + pdata->pchip.npwm = 1; + pdata->pchip.of_xlate = of_pwm_single_xlate; + pdata->pchip.of_pwm_n_cells = 1; + + return pwmchip_add(&pdata->pchip); +} + +static void ti_sn_pwm_remove(struct auxiliary_device *adev) +{ + struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); + + pwmchip_remove(&pdata->pchip); + + if (pdata->pwm_enabled) + pm_runtime_put_sync(pdata->dev); +} + +static const struct auxiliary_device_id ti_sn_pwm_id_table[] = { + { .name = "ti_sn65dsi86.pwm", }, + {}, +}; + +static struct auxiliary_driver ti_sn_pwm_driver = { + .name = "pwm", + .probe = ti_sn_pwm_probe, + .remove = ti_sn_pwm_remove, + .id_table = ti_sn_pwm_id_table, +}; + +static int __init ti_sn_pwm_register(void) +{ + return auxiliary_driver_register(&ti_sn_pwm_driver); +} + +static void ti_sn_pwm_unregister(void) +{ + auxiliary_driver_unregister(&ti_sn_pwm_driver); +} + +#else +static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } +static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} + +static inline int ti_sn_pwm_register(void) { return 0; } +static inline void ti_sn_pwm_unregister(void) {} +#endif + +/* ----------------------------------------------------------------------------- + * GPIO Controller + */ #if defined(CONFIG_OF_GPIO) static int tn_sn_bridge_of_xlate(struct gpio_chip *chip, @@ -1395,10 +1708,25 @@ static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip, return ret; } +static int ti_sn_bridge_gpio_request(struct gpio_chip *chip, unsigned int offset) +{ + struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); + + if (offset == SN_PWM_GPIO_IDX) + return ti_sn_pwm_pin_request(pdata); + + return 0; +} + static void ti_sn_bridge_gpio_free(struct gpio_chip *chip, unsigned int offset) { + struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); + /* We won't keep pm_runtime if we're input, so switch there on free */ ti_sn_bridge_gpio_direction_input(chip, offset); + + if (offset == SN_PWM_GPIO_IDX) + ti_sn_pwm_pin_release(pdata); } static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = { @@ -1420,6 +1748,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev, pdata->gchip.owner = THIS_MODULE; pdata->gchip.of_xlate = tn_sn_bridge_of_xlate; pdata->gchip.of_gpio_n_cells = 2; + pdata->gchip.request = ti_sn_bridge_gpio_request; pdata->gchip.free = ti_sn_bridge_gpio_free; pdata->gchip.get_direction = ti_sn_bridge_gpio_get_direction; pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input; @@ -1546,10 +1875,9 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, * ordering. The bridge wants the panel to be there when it probes. * The panel wants its HPD GPIO (provided by sn65dsi86 on some boards) * when it probes. The panel and maybe backlight might want the DDC - * bus. Soon the PWM provided by the bridge chip will have the same - * problem. Having sub-devices allows the some sub devices to finish - * probing even if others return -EPROBE_DEFER and gets us around the - * problems. + * bus or the pwm_chip. Having sub-devices allows the some sub devices + * to finish probing even if others return -EPROBE_DEFER and gets us + * around the problems. */ if (IS_ENABLED(CONFIG_OF_GPIO)) { @@ -1558,6 +1886,12 @@ static int ti_sn65dsi86_probe(struct i2c_client *client, return ret; } + if (IS_ENABLED(CONFIG_PWM)) { + ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm"); + if (ret) + return ret; + } + /* * NOTE: At the end of the AUX channel probe we'll add the aux device * for the bridge. This is because the bridge can't be used until the @@ -1601,10 +1935,14 @@ static int __init ti_sn65dsi86_init(void) if (ret) goto err_main_was_registered; - ret = auxiliary_driver_register(&ti_sn_aux_driver); + ret = ti_sn_pwm_register(); if (ret) goto err_gpio_was_registered; + ret = auxiliary_driver_register(&ti_sn_aux_driver); + if (ret) + goto err_pwm_was_registered; + ret = auxiliary_driver_register(&ti_sn_bridge_driver); if (ret) goto err_aux_was_registered; @@ -1613,6 +1951,8 @@ static int __init ti_sn65dsi86_init(void) err_aux_was_registered: auxiliary_driver_unregister(&ti_sn_aux_driver); +err_pwm_was_registered: + ti_sn_pwm_unregister(); err_gpio_was_registered: ti_sn_gpio_unregister(); err_main_was_registered: @@ -1626,6 +1966,7 @@ static void __exit ti_sn65dsi86_exit(void) { auxiliary_driver_unregister(&ti_sn_bridge_driver); auxiliary_driver_unregister(&ti_sn_aux_driver); + ti_sn_pwm_unregister(); ti_sn_gpio_unregister(); i2c_del_driver(&ti_sn65dsi86_driver); } diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index ff1416cd609a..21174efd91be 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -74,7 +74,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) ret = wait_for_completion_timeout(&commit->hw_done, timeout); if (!ret) { - DRM_ERROR("hw_done timed out\n"); + drm_err(commit->crtc->dev, "hw_done timed out\n"); return -ETIMEDOUT; } @@ -84,7 +84,7 @@ int drm_crtc_commit_wait(struct drm_crtc_commit *commit) */ ret = wait_for_completion_timeout(&commit->flip_done, timeout); if (!ret) { - DRM_ERROR("flip_done timed out\n"); + drm_err(commit->crtc->dev, "flip_done timed out\n"); return -ETIMEDOUT; } @@ -140,7 +140,7 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) state->dev = dev; - DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); + drm_dbg_atomic(dev, "Allocated atomic state %p\n", state); return 0; fail: @@ -191,7 +191,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) struct drm_mode_config *config = &dev->mode_config; int i; - DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); + drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; @@ -301,7 +301,7 @@ void __drm_atomic_state_free(struct kref *ref) drm_atomic_state_clear(state); - DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); + drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state); if (config->funcs->atomic_state_free) { config->funcs->atomic_state_free(state); @@ -358,8 +358,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index].ptr = crtc; crtc_state->state = state; - DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", - crtc->base.id, crtc->name, crtc_state, state); + drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n", + crtc->base.id, crtc->name, crtc_state, state); return crtc_state; } @@ -379,8 +379,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (new_crtc_state->active && !new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] active without enabled\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -390,15 +391,17 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] enabled without mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] disabled with mode blob\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -414,8 +417,9 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, */ if (new_crtc_state->event && !new_crtc_state->active && !old_crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] requesting event but off\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -460,8 +464,9 @@ static int drm_atomic_connector_check(struct drm_connector *connector, return 0; if (writeback_job->fb && !state->crtc) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] framebuffer without CRTC\n", + connector->base.id, connector->name); return -EINVAL; } @@ -470,16 +475,18 @@ static int drm_atomic_connector_check(struct drm_connector *connector, state->crtc); if (writeback_job->fb && !crtc_state->active) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", - connector->base.id, connector->name, - state->crtc->base.id); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", + connector->base.id, connector->name, + state->crtc->base.id); return -EINVAL; } if (!writeback_job->fb) { if (writeback_job->out_fence) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", + connector->base.id, connector->name); return -EINVAL; } @@ -537,8 +544,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index].new_state = plane_state; plane_state->state = state; - DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", - plane->base.id, plane->name, plane_state, state); + drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n", + plane->base.id, plane->name, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -594,12 +601,12 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, /* either *both* CRTC and FB must be set, or neither */ if (crtc && !fb) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", + plane->base.id, plane->name); return -EINVAL; } else if (fb && !crtc) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", + plane->base.id, plane->name); return -EINVAL; } @@ -609,9 +616,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { - DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", - crtc->base.id, crtc->name, - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name); return -EINVAL; } @@ -619,9 +627,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, ret = drm_plane_check_pixel_format(plane, fb->format->format, fb->modifier); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", - plane->base.id, plane->name, - &fb->format->format, fb->modifier); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", + plane->base.id, plane->name, + &fb->format->format, fb->modifier); return ret; } @@ -630,10 +639,11 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || new_plane_state->crtc_h > INT_MAX || new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", - plane->base.id, plane->name, - new_plane_state->crtc_w, new_plane_state->crtc_h, - new_plane_state->crtc_x, new_plane_state->crtc_y); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", + plane->base.id, plane->name, + new_plane_state->crtc_w, new_plane_state->crtc_h, + new_plane_state->crtc_x, new_plane_state->crtc_y); return -ERANGE; } @@ -645,18 +655,19 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, new_plane_state->src_x > fb_width - new_plane_state->src_w || new_plane_state->src_h > fb_height || new_plane_state->src_y > fb_height - new_plane_state->src_h) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", - plane->base.id, plane->name, - new_plane_state->src_w >> 16, - ((new_plane_state->src_w & 0xffff) * 15625) >> 10, - new_plane_state->src_h >> 16, - ((new_plane_state->src_h & 0xffff) * 15625) >> 10, - new_plane_state->src_x >> 16, - ((new_plane_state->src_x & 0xffff) * 15625) >> 10, - new_plane_state->src_y >> 16, - ((new_plane_state->src_y & 0xffff) * 15625) >> 10, - fb->width, fb->height); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", + plane->base.id, plane->name, + new_plane_state->src_w >> 16, + ((new_plane_state->src_w & 0xffff) * 15625) >> 10, + new_plane_state->src_h >> 16, + ((new_plane_state->src_h & 0xffff) * 15625) >> 10, + new_plane_state->src_x >> 16, + ((new_plane_state->src_x & 0xffff) * 15625) >> 10, + new_plane_state->src_y >> 16, + ((new_plane_state->src_y & 0xffff) * 15625) >> 10, + fb->width, fb->height); return -ENOSPC; } @@ -671,9 +682,10 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, clips->y1 < 0 || clips->x2 > fb_width || clips->y2 > fb_height) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", - plane->base.id, plane->name, clips->x1, - clips->y1, clips->x2, clips->y2); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", + plane->base.id, plane->name, clips->x1, + clips->y1, clips->x2, clips->y2); return -EINVAL; } clips++; @@ -681,8 +693,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, } if (plane_switching_crtc(old_plane_state, new_plane_state)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] switching CRTC directly\n", + plane->base.id, plane->name); return -EINVAL; } @@ -846,8 +859,9 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, state->num_private_objs = num_objs; - DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", - obj, obj_state, state); + drm_dbg_atomic(state->dev, + "Added new private object %p state %p to %p\n", + obj, obj_state, state); return obj_state; } @@ -1027,7 +1041,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->connectors[index].ptr = connector; connector_state->state = state; - DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", + drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n", connector->base.id, connector->name, connector_state, state); @@ -1160,8 +1174,9 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, if (!encoder) return 0; - DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n", - encoder->base.id, encoder->name, state); + drm_dbg_atomic(encoder->dev, + "Adding all bridges for [encoder:%d:%s] to %p\n", + encoder->base.id, encoder->name, state); drm_for_each_bridge_in_chain(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ @@ -1213,8 +1228,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Adding all current connectors for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); /* * Changed connectors are already in @state, so only need to look @@ -1267,8 +1283,9 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state, WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); - DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", - crtc->base.id, crtc->name, state); + drm_dbg_atomic(crtc->dev, + "Adding all current planes for [CRTC:%d:%s] to %p\n", + crtc->base.id, crtc->name, state); drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { struct drm_plane_state *plane_state = @@ -1308,7 +1325,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) unsigned int affected_crtc = 0; int i, ret = 0; - DRM_DEBUG_ATOMIC("checking %p\n", state); + drm_dbg_atomic(dev, "checking %p\n", state); for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) requested_crtc |= drm_crtc_mask(crtc); @@ -1316,8 +1333,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ret = drm_atomic_plane_check(old_plane_state, new_plane_state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", - plane->base.id, plane->name); + drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n", + plane->base.id, plane->name); return ret; } } @@ -1325,8 +1342,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1334,8 +1351,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) for_each_new_connector_in_state(state, conn, conn_state, i) { ret = drm_atomic_connector_check(conn, conn_state); if (ret) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", - conn->base.id, conn->name); + drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n", + conn->base.id, conn->name); return ret; } } @@ -1344,8 +1361,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) ret = config->funcs->atomic_check(state->dev, state); if (ret) { - DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", - state, ret); + drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n", + state, ret); return ret; } } @@ -1353,8 +1370,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (!state->allow_modeset) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -1374,8 +1391,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) * so compositors know what's going on. */ if (affected_crtc != requested_crtc) { - DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", - requested_crtc, affected_crtc); + drm_dbg_atomic(dev, + "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", + requested_crtc, affected_crtc); WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n", requested_crtc, affected_crtc); } @@ -1407,7 +1425,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_ATOMIC("committing %p\n", state); + drm_dbg_atomic(state->dev, "committing %p\n", state); return config->funcs->atomic_commit(state->dev, state, false); } @@ -1436,7 +1454,7 @@ int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); + drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state); return config->funcs->atomic_commit(state->dev, state, true); } @@ -1633,11 +1651,11 @@ void drm_atomic_print_new_state(const struct drm_atomic_state *state, int i; if (!p) { - DRM_ERROR("invalid drm printer\n"); + drm_err(state->dev, "invalid drm printer\n"); return; } - DRM_DEBUG_ATOMIC("checking %p\n", state); + drm_dbg_atomic(state->dev, "checking %p\n", state); for_each_new_plane_in_state(state, plane, plane_state, i) drm_atomic_plane_print_state(p, plane_state); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2c0c6ec92820..aef2fbd676e5 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -132,9 +132,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, if (new_encoder) { if (encoder_mask & drm_encoder_mask(new_encoder)) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", - new_encoder->base.id, new_encoder->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", + new_encoder->base.id, new_encoder->name, + connector->base.id, connector->name); return -EINVAL; } @@ -169,11 +170,12 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, continue; if (!disable_conflicting_encoders) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", - encoder->base.id, encoder->name, - connector->state->crtc->base.id, - connector->state->crtc->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + connector->state->crtc->base.id, + connector->state->crtc->name, + connector->base.id, connector->name); ret = -EINVAL; goto out; } @@ -184,10 +186,11 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, goto out; } - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", - encoder->base.id, encoder->name, - new_conn_state->crtc->base.id, new_conn_state->crtc->name, - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", + encoder->base.id, encoder->name, + new_conn_state->crtc->base.id, new_conn_state->crtc->name, + connector->base.id, connector->name); crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); @@ -268,9 +271,10 @@ steal_encoder(struct drm_atomic_state *state, encoder_crtc = old_connector_state->crtc; - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", - encoder->base.id, encoder->name, - encoder_crtc->base.id, encoder_crtc->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id, encoder_crtc->name); set_best_encoder(state, new_connector_state, NULL); @@ -291,9 +295,8 @@ update_connector_routing(struct drm_atomic_state *state, struct drm_encoder *new_encoder; struct drm_crtc_state *crtc_state; - DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, "Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); if (old_connector_state->crtc != new_connector_state->crtc) { if (old_connector_state->crtc) { @@ -308,9 +311,8 @@ update_connector_routing(struct drm_atomic_state *state, } if (!new_connector_state->crtc) { - DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, "Disabling [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); set_best_encoder(state, new_connector_state, NULL); @@ -339,8 +341,9 @@ update_connector_routing(struct drm_atomic_state *state, */ if (!state->duplicated && drm_connector_is_unregistered(connector) && crtc_state->active) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", - connector->base.id, connector->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] is not registered\n", + connector->base.id, connector->name); return -EINVAL; } @@ -354,31 +357,33 @@ update_connector_routing(struct drm_atomic_state *state, new_encoder = drm_connector_get_single_encoder(connector); if (!new_encoder) { - DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + drm_dbg_atomic(connector->dev, + "No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); return -EINVAL; } if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return -EINVAL; } if (new_encoder == new_connector_state->best_encoder) { set_best_encoder(state, new_connector_state, new_encoder); - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return 0; } @@ -389,13 +394,14 @@ update_connector_routing(struct drm_atomic_state *state, crtc_state->connectors_changed = true; - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - new_connector_state->crtc->base.id, - new_connector_state->crtc->name); + drm_dbg_atomic(connector->dev, + "[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + new_connector_state->crtc->base.id, + new_connector_state->crtc->name); return 0; } @@ -443,7 +449,7 @@ mode_fixup(struct drm_atomic_state *state) new_crtc_state, new_conn_state); if (ret) { - DRM_DEBUG_ATOMIC("Bridge atomic check failed\n"); + drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n"); return ret; } @@ -451,16 +457,18 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->atomic_check(encoder, new_crtc_state, new_conn_state); if (ret) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); return ret; } } else if (funcs && funcs->mode_fixup) { ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, &new_crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); return -EINVAL; } } @@ -483,8 +491,8 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, &new_crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] fixup failed\n", + crtc->base.id, crtc->name); return -EINVAL; } } @@ -502,8 +510,9 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, ret = drm_encoder_mode_valid(encoder, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(encoder->dev, + "[ENCODER:%d:%s] mode_valid() failed\n", + encoder->base.id, encoder->name); return ret; } @@ -511,14 +520,14 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector, ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); + drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n"); return ret; } ret = drm_crtc_mode_valid(crtc, mode); if (ret != MODE_OK) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(encoder->dev, "[CRTC:%d:%s] mode_valid() failed\n", + crtc->base.id, crtc->name); return ret; } @@ -619,14 +628,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] mode changed\n", + crtc->base.id, crtc->name); new_crtc_state->mode_changed = true; } if (old_crtc_state->enable != new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] enable changed\n", + crtc->base.id, crtc->name); /* * For clarity this assignment is done here, but @@ -641,14 +650,14 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, } if (old_crtc_state->active != new_crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] active changed\n", + crtc->base.id, crtc->name); new_crtc_state->active_changed = true; } if (new_crtc_state->enable != has_connectors) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "[CRTC:%d:%s] enabled/connectors mismatch\n", + crtc->base.id, crtc->name); return -EINVAL; } @@ -708,10 +717,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", - crtc->base.id, crtc->name, - new_crtc_state->enable ? 'y' : 'n', - new_crtc_state->active ? 'y' : 'n'); + drm_dbg_atomic(dev, + "[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, crtc->name, + new_crtc_state->enable ? 'y' : 'n', + new_crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); if (ret != 0) @@ -818,7 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, } if (!crtc_state->enable && !can_update_disabled) { - DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Cannot update plane of a disabled CRTC.\n"); return -EINVAL; } @@ -828,7 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { - DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", &plane_state->src, true); drm_rect_debug_print("dst: ", &plane_state->dst, false); return -ERANGE; @@ -852,7 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, return 0; if (!can_position && !drm_rect_equals(dst, &clip)) { - DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_dbg_kms(plane_state->crtc->dev, + "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", &clip, false); return -EINVAL; @@ -904,8 +917,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, state); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] atomic driver check failed\n", + plane->base.id, plane->name); return ret; } } @@ -920,8 +934,9 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.id, crtc->name); return ret; } } @@ -1049,8 +1064,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = encoder->helper_private; - DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1087,8 +1102,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "disabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); /* Right function depends upon target state. */ @@ -1229,8 +1244,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; if (new_crtc_state->enable && funcs->mode_set_nofb) { - DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "modeset on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); funcs->mode_set_nofb(crtc); } @@ -1254,8 +1269,8 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) if (!new_crtc_state->mode_changed) continue; - DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1357,8 +1372,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, funcs = crtc->helper_private; if (new_crtc_state->enable) { - DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(dev, "enabling [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); if (funcs->atomic_enable) funcs->atomic_enable(crtc, old_state); else if (funcs->commit) @@ -1381,8 +1396,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, encoder = new_conn_state->best_encoder; funcs = encoder->helper_private; - DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + drm_dbg_atomic(dev, "enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal @@ -1551,8 +1566,8 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", - crtc->base.id, crtc->name); + drm_err(dev, "[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); } if (old_state->fake_commit) @@ -1739,8 +1754,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev, */ if (old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->hw_done)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n", - plane->base.id, plane->name); + drm_dbg_atomic(dev, + "[PLANE:%d:%s] inflight previous commit preventing async commit\n", + plane->base.id, plane->name); return -EBUSY; } @@ -1962,8 +1978,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) */ if (!completed && nonblock) { spin_unlock(&crtc->commit_lock); - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] busy with a previous commit\n", + crtc->base.id, crtc->name); return -EBUSY; } @@ -1985,8 +2002,8 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock) ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 10*HZ); if (ret == 0) - DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", - crtc->base.id, crtc->name); + drm_err(crtc->dev, "[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); drm_crtc_commit_put(stall_commit); @@ -2150,8 +2167,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, */ if (nonblock && old_conn_state->commit && !try_wait_for_completion(&old_conn_state->commit->flip_done)) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n", - conn->base.id, conn->name); + drm_dbg_atomic(conn->dev, + "[CONNECTOR:%d:%s] busy with a previous commit\n", + conn->base.id, conn->name); return -EBUSY; } @@ -2171,8 +2189,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, */ if (nonblock && old_plane_state->commit && !try_wait_for_completion(&old_plane_state->commit->flip_done)) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n", - plane->base.id, plane->name); + drm_dbg_atomic(plane->dev, + "[PLANE:%d:%s] busy with a previous commit\n", + plane->base.id, plane->name); return -EBUSY; } @@ -2218,22 +2237,25 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { ret = drm_crtc_commit_wait(old_crtc_state->commit); if (ret) - DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n", - crtc->base.id, crtc->name); + drm_err(crtc->dev, + "[CRTC:%d:%s] commit wait timed out\n", + crtc->base.id, crtc->name); } for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { ret = drm_crtc_commit_wait(old_conn_state->commit); if (ret) - DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n", - conn->base.id, conn->name); + drm_err(conn->dev, + "[CONNECTOR:%d:%s] commit wait timed out\n", + conn->base.id, conn->name); } for_each_old_plane_in_state(old_state, plane, old_plane_state, i) { ret = drm_crtc_commit_wait(old_plane_state->commit); if (ret) - DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n", - plane->base.id, plane->name); + drm_err(plane->dev, + "[PLANE:%d:%s] commit wait timed out\n", + plane->base.id, plane->name); } } EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); @@ -3120,7 +3142,9 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) ret = drm_atomic_helper_disable_all(dev, &ctx); if (ret) - DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); + drm_err(dev, + "Disabling all crtc's during unload failed with %i\n", + ret); DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } @@ -3380,8 +3404,9 @@ static int page_flip_common(struct drm_atomic_state *state, /* Make sure we don't accidentally do a full modeset. */ state->allow_modeset = false; if (!crtc_state->active) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", - crtc->base.id, crtc->name); + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] disabled, rejecting legacy flip\n", + crtc->base.id, crtc->name); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 909f31833181..9781722519c3 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -773,7 +773,7 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, state->scaling_mode = val; } else if (property == config->content_protection_property) { if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { - DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); + drm_dbg_kms(dev, "only drivers can set CP Enabled\n"); return -EINVAL; } state->content_protection = val; @@ -797,6 +797,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, fence_ptr); } else if (property == connector->max_bpc_property) { state->max_requested_bpc = val; + } else if (property == connector->privacy_screen_sw_state_property) { + state->privacy_screen_sw_state = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -874,6 +876,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = 0; } else if (property == connector->max_bpc_property) { *val = state->max_requested_bpc; + } else if (property == connector->privacy_screen_sw_state_property) { + *val = state->privacy_screen_sw_state; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c index 60a6b21474b1..6e433d465f41 100644 --- a/drivers/gpu/drm/drm_auth.c +++ b/drivers/gpu/drm/drm_auth.c @@ -106,7 +106,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) auth->magic = file_priv->magic; mutex_unlock(&dev->master_mutex); - DRM_DEBUG("%u\n", auth->magic); + drm_dbg_core(dev, "%u\n", auth->magic); return ret < 0 ? ret : 0; } @@ -117,7 +117,7 @@ int drm_authmagic(struct drm_device *dev, void *data, struct drm_auth *auth = data; struct drm_file *file; - DRM_DEBUG("%u\n", auth->magic); + drm_dbg_core(dev, "%u\n", auth->magic); mutex_lock(&dev->master_mutex); file = idr_find(&file_priv->master->magic_map, auth->magic); @@ -274,7 +274,9 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, } if (file_priv->master->lessor != NULL) { - DRM_DEBUG_LEASE("Attempt to set lessee %d as master\n", file_priv->master->lessee_id); + drm_dbg_lease(dev, + "Attempt to set lessee %d as master\n", + file_priv->master->lessee_id); ret = -EINVAL; goto out_unlock; } @@ -315,7 +317,9 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, } if (file_priv->master->lessor != NULL) { - DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id); + drm_dbg_lease(dev, + "Attempt to drop lessee %d as master\n", + file_priv->master->lessee_id); ret = -EINVAL; goto out_unlock; } diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 30cc59fe6ef7..f19d9acbe959 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -31,7 +31,7 @@ #include <linux/dma-buf-map.h> #include <linux/export.h> #include <linux/highmem.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <xen/xen.h> #include <drm/drm_cache.h> @@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits) * Enforce dma_alloc_coherent when memory encryption is active as well * for the same reasons as for Xen paravirtual hosts. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return true; for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 52e20c68813b..a50c82bc2b2f 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -28,6 +28,7 @@ #include <drm/drm_print.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_sysfs.h> #include <linux/uaccess.h> @@ -462,6 +463,11 @@ void drm_connector_cleanup(struct drm_connector *connector) DRM_CONNECTOR_REGISTERED)) drm_connector_unregister(connector); + if (connector->privacy_screen) { + drm_privacy_screen_put(connector->privacy_screen); + connector->privacy_screen = NULL; + } + if (connector->tile_group) { drm_mode_put_tile_group(dev, connector->tile_group); connector->tile_group = NULL; @@ -541,7 +547,11 @@ int drm_connector_register(struct drm_connector *connector) connector->registration_state = DRM_CONNECTOR_REGISTERED; /* Let userspace know we have a new connector */ - drm_sysfs_hotplug_event(connector->dev); + drm_sysfs_connector_hotplug_event(connector); + + if (connector->privacy_screen) + drm_privacy_screen_register_notifier(connector->privacy_screen, + &connector->privacy_screen_notifier); mutex_lock(&connector_list_lock); list_add_tail(&connector->global_connector_list_entry, &connector_list); @@ -578,6 +588,11 @@ void drm_connector_unregister(struct drm_connector *connector) list_del_init(&connector->global_connector_list_entry); mutex_unlock(&connector_list_lock); + if (connector->privacy_screen) + drm_privacy_screen_unregister_notifier( + connector->privacy_screen, + &connector->privacy_screen_notifier); + if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -1271,6 +1286,46 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * For DVI-I and TVout there is also a matching property "select subconnector" * allowing to switch between signal types. * DP subconnector corresponds to a downstream port. + * + * privacy-screen sw-state, privacy-screen hw-state: + * These 2 optional properties can be used to query the state of the + * electronic privacy screen that is available on some displays; and in + * some cases also control the state. If a driver implements these + * properties then both properties must be present. + * + * "privacy-screen hw-state" is read-only and reflects the actual state + * of the privacy-screen, possible values: "Enabled", "Disabled, + * "Enabled-locked", "Disabled-locked". The locked states indicate + * that the state cannot be changed through the DRM API. E.g. there + * might be devices where the firmware-setup options, or a hardware + * slider-switch, offer always on / off modes. + * + * "privacy-screen sw-state" can be set to change the privacy-screen state + * when not locked. In this case the driver must update the hw-state + * property to reflect the new state on completion of the commit of the + * sw-state property. Setting the sw-state property when the hw-state is + * locked must be interpreted by the driver as a request to change the + * state to the set state when the hw-state becomes unlocked. E.g. if + * "privacy-screen hw-state" is "Enabled-locked" and the sw-state + * gets set to "Disabled" followed by the user unlocking the state by + * changing the slider-switch position, then the driver must set the + * state to "Disabled" upon receiving the unlock event. + * + * In some cases the privacy-screen's actual state might change outside of + * control of the DRM code. E.g. there might be a firmware handled hotkey + * which toggles the actual state, or the actual state might be changed + * through another userspace API such as writing /proc/acpi/ibm/lcdshadow. + * In this case the driver must update both the hw-state and the sw-state + * to reflect the new value, overwriting any pending state requests in the + * sw-state. Any pending sw-state requests are thus discarded. + * + * Note that the ability for the state to change outside of control of + * the DRM master process means that userspace must not cache the value + * of the sw-state. Caching the sw-state value and including it in later + * atomic commits may lead to overriding a state change done through e.g. + * a firmware handled hotkey. Therefor userspace must not include the + * privacy-screen sw-state in an atomic commit unless it wants to change + * its value. */ int drm_connector_create_standard_properties(struct drm_device *dev) @@ -2365,6 +2420,154 @@ int drm_connector_set_panel_orientation_with_quirk( } EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk); +static const struct drm_prop_enum_list privacy_screen_enum[] = { + { PRIVACY_SCREEN_DISABLED, "Disabled" }, + { PRIVACY_SCREEN_ENABLED, "Enabled" }, + { PRIVACY_SCREEN_DISABLED_LOCKED, "Disabled-locked" }, + { PRIVACY_SCREEN_ENABLED_LOCKED, "Enabled-locked" }, +}; + +/** + * drm_connector_create_privacy_screen_properties - create the drm connecter's + * privacy-screen properties. + * @connector: connector for which to create the privacy-screen properties + * + * This function creates the "privacy-screen sw-state" and "privacy-screen + * hw-state" properties for the connector. They are not attached. + */ +void +drm_connector_create_privacy_screen_properties(struct drm_connector *connector) +{ + if (connector->privacy_screen_sw_state_property) + return; + + /* Note sw-state only supports the first 2 values of the enum */ + connector->privacy_screen_sw_state_property = + drm_property_create_enum(connector->dev, DRM_MODE_PROP_ENUM, + "privacy-screen sw-state", + privacy_screen_enum, 2); + + connector->privacy_screen_hw_state_property = + drm_property_create_enum(connector->dev, + DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ENUM, + "privacy-screen hw-state", + privacy_screen_enum, + ARRAY_SIZE(privacy_screen_enum)); +} +EXPORT_SYMBOL(drm_connector_create_privacy_screen_properties); + +/** + * drm_connector_attach_privacy_screen_properties - attach the drm connecter's + * privacy-screen properties. + * @connector: connector on which to attach the privacy-screen properties + * + * This function attaches the "privacy-screen sw-state" and "privacy-screen + * hw-state" properties to the connector. The initial state of both is set + * to "Disabled". + */ +void +drm_connector_attach_privacy_screen_properties(struct drm_connector *connector) +{ + if (!connector->privacy_screen_sw_state_property) + return; + + drm_object_attach_property(&connector->base, + connector->privacy_screen_sw_state_property, + PRIVACY_SCREEN_DISABLED); + + drm_object_attach_property(&connector->base, + connector->privacy_screen_hw_state_property, + PRIVACY_SCREEN_DISABLED); +} +EXPORT_SYMBOL(drm_connector_attach_privacy_screen_properties); + +static void drm_connector_update_privacy_screen_properties( + struct drm_connector *connector, bool set_sw_state) +{ + enum drm_privacy_screen_status sw_state, hw_state; + + drm_privacy_screen_get_state(connector->privacy_screen, + &sw_state, &hw_state); + + if (set_sw_state) + connector->state->privacy_screen_sw_state = sw_state; + drm_object_property_set_value(&connector->base, + connector->privacy_screen_hw_state_property, hw_state); +} + +static int drm_connector_privacy_screen_notifier( + struct notifier_block *nb, unsigned long action, void *data) +{ + struct drm_connector *connector = + container_of(nb, struct drm_connector, privacy_screen_notifier); + struct drm_device *dev = connector->dev; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_connector_update_privacy_screen_properties(connector, true); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_sysfs_connector_status_event(connector, + connector->privacy_screen_sw_state_property); + drm_sysfs_connector_status_event(connector, + connector->privacy_screen_hw_state_property); + + return NOTIFY_DONE; +} + +/** + * drm_connector_attach_privacy_screen_provider - attach a privacy-screen to + * the connector + * @connector: connector to attach the privacy-screen to + * @priv: drm_privacy_screen to attach + * + * Create and attach the standard privacy-screen properties and register + * a generic notifier for generating sysfs-connector-status-events + * on external changes to the privacy-screen status. + * This function takes ownership of the passed in drm_privacy_screen and will + * call drm_privacy_screen_put() on it when the connector is destroyed. + */ +void drm_connector_attach_privacy_screen_provider( + struct drm_connector *connector, struct drm_privacy_screen *priv) +{ + connector->privacy_screen = priv; + connector->privacy_screen_notifier.notifier_call = + drm_connector_privacy_screen_notifier; + + drm_connector_create_privacy_screen_properties(connector); + drm_connector_update_privacy_screen_properties(connector, true); + drm_connector_attach_privacy_screen_properties(connector); +} +EXPORT_SYMBOL(drm_connector_attach_privacy_screen_provider); + +/** + * drm_connector_update_privacy_screen - update connector's privacy-screen sw-state + * @connector_state: connector-state to update the privacy-screen for + * + * This function calls drm_privacy_screen_set_sw_state() on the connector's + * privacy-screen. + * + * If the connector has no privacy-screen, then this is a no-op. + */ +void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state) +{ + struct drm_connector *connector = connector_state->connector; + int ret; + + if (!connector->privacy_screen) + return; + + ret = drm_privacy_screen_set_sw_state(connector->privacy_screen, + connector_state->privacy_screen_sw_state); + if (ret) { + drm_err(connector->dev, "Error updating privacy-screen sw_state\n"); + return; + } + + /* The hw_state property value may have changed, update it. */ + drm_connector_update_privacy_screen_properties(connector, false); +} +EXPORT_SYMBOL(drm_connector_update_privacy_screen); + int drm_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 6d0f2c447f3b..23f9073bc473 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -130,6 +130,20 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI } EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); +/* DP 2.0 128b/132b */ +u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); + int s = ((lane & 1) ? + DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT : + DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT); + u8 l = dp_link_status(link_status, i); + + return (l >> s) & 0xf; +} +EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset); + u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], unsigned int lane) { @@ -140,38 +154,155 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ } EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor); -void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, - const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) { - unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; - if (rd_interval > 4) - drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n", + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n", aux->name, rd_interval); - if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) - rd_interval = 100; - else - rd_interval *= 4 * USEC_PER_MSEC; + if (rd_interval == 0) + return 100; - usleep_range(rd_interval, rd_interval * 2); + return rd_interval * 4 * USEC_PER_MSEC; } -EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); -static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, - unsigned long rd_interval) +static int __8b10b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) { if (rd_interval > 4) - drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n", + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n", aux->name, rd_interval); if (rd_interval == 0) - rd_interval = 400; + return 400; + + return rd_interval * 4 * USEC_PER_MSEC; +} + +static int __128b132b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval) +{ + switch (rd_interval) { + default: + drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x\n", + aux->name, rd_interval); + fallthrough; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US: + return 400; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS: + return 4000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS: + return 8000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS: + return 12000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS: + return 16000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS: + return 32000; + case DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS: + return 64000; + } +} + +/* + * The link training delays are different for: + * + * - Clock recovery vs. channel equalization + * - DPRX vs. LTTPR + * - 128b/132b vs. 8b/10b + * - DPCD rev 1.3 vs. later + * + * Get the correct delay in us, reading DPCD if necessary. + */ +static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr, bool cr) +{ + int (*parse)(const struct drm_dp_aux *aux, u8 rd_interval); + unsigned int offset; + u8 rd_interval, mask; + + if (dp_phy == DP_PHY_DPRX) { + if (uhbr) { + if (cr) + return 100; + + offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL; + mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + parse = __128b132b_channel_eq_delay_us; + } else { + if (cr && dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + return 100; + + offset = DP_TRAINING_AUX_RD_INTERVAL; + mask = DP_TRAINING_AUX_RD_MASK; + if (cr) + parse = __8b10b_clock_recovery_delay_us; + else + parse = __8b10b_channel_eq_delay_us; + } + } else { + if (uhbr) { + offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy); + mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK; + parse = __128b132b_channel_eq_delay_us; + } else { + if (cr) + return 100; + + offset = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy); + mask = DP_TRAINING_AUX_RD_MASK; + parse = __8b10b_channel_eq_delay_us; + } + } + + if (offset < DP_RECEIVER_CAP_SIZE) { + rd_interval = dpcd[offset]; + } else { + if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) { + drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n", + aux->name); + /* arbitrary default delay */ + return 400; + } + } + + return parse(aux, rd_interval & mask); +} + +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr) +{ + return __read_delay(aux, dpcd, dp_phy, uhbr, true); +} +EXPORT_SYMBOL(drm_dp_read_clock_recovery_delay); + +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE], + enum drm_dp_phy dp_phy, bool uhbr) +{ + return __read_delay(aux, dpcd, dp_phy, uhbr, false); +} +EXPORT_SYMBOL(drm_dp_read_channel_eq_delay); + +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux, + const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + u8 rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + int delay_us; + + if (dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + delay_us = 100; else - rd_interval *= 4 * USEC_PER_MSEC; + delay_us = __8b10b_clock_recovery_delay_us(aux, rd_interval); + + usleep_range(delay_us, delay_us * 2); +} +EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); + +static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, + u8 rd_interval) +{ + int delay_us = __8b10b_channel_eq_delay_us(aux, rd_interval); - usleep_range(rd_interval, rd_interval * 2); + usleep_range(delay_us, delay_us * 2); } void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, @@ -207,15 +338,33 @@ EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay); u8 drm_dp_link_rate_to_bw_code(int link_rate) { - /* Spec says link_bw = link_rate / 0.27Gbps */ - return link_rate / 27000; + switch (link_rate) { + case 1000000: + return DP_LINK_BW_10; + case 1350000: + return DP_LINK_BW_13_5; + case 2000000: + return DP_LINK_BW_20; + default: + /* Spec says link_bw = link_rate / 0.27Gbps */ + return link_rate / 27000; + } } EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code); int drm_dp_bw_code_to_link_rate(u8 link_bw) { - /* Spec says link_rate = link_bw * 0.27Gbps */ - return link_bw * 27000; + switch (link_bw) { + case DP_LINK_BW_10: + return 1000000; + case DP_LINK_BW_13_5: + return 1350000; + case DP_LINK_BW_20: + return 2000000; + default: + /* Spec says link_rate = link_bw * 0.27Gbps */ + return link_bw * 27000; + } } EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate); @@ -590,7 +739,7 @@ static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux, u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - u8 dpcd_ext[6]; + u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; int ret; /* @@ -3141,6 +3290,10 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac int ret; u8 buf[2] = { 0 }; + /* The panel uses the PWM for controlling brightness levels */ + if (!bl->aux_set) + return 0; + if (bl->lsb_reg_used) { buf[0] = (level & 0xff00) >> 8; buf[1] = (level & 0x00ff); @@ -3167,7 +3320,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli int ret; u8 buf; - /* The panel uses something other then DPCD for enabling its backlight */ + /* This panel uses the EDP_BL_PWR GPIO for enablement */ if (!bl->aux_enable) return 0; @@ -3202,11 +3355,11 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli * restoring any important backlight state such as the given backlight level, the brightness byte * count, backlight frequency, etc. * - * Note that certain panels, while supporting brightness level controls over DPCD, may not support - * having their backlights enabled via the standard %DP_EDP_DISPLAY_CONTROL_REGISTER. On such panels - * &drm_edp_backlight_info.aux_enable will be set to %false, this function will skip the step of - * programming the %DP_EDP_DISPLAY_CONTROL_REGISTER, and the driver must perform the required - * implementation specific step for enabling the backlight after calling this function. + * Note that certain panels do not support being enabled or disabled via DPCD, but instead require + * that the driver handle enabling/disabling the panel through implementation-specific means using + * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false, + * this function becomes a no-op, and the driver is expected to handle powering the panel on using + * the EDP_BL_PWR GPIO. * * Returns: %0 on success, negative error code on failure. */ @@ -3214,27 +3367,18 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli const u16 level) { int ret; - u8 dpcd_buf, new_dpcd_buf; - - ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf); - if (ret != 1) { - drm_dbg_kms(aux->drm_dev, - "%s: Failed to read backlight mode: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; - } - - new_dpcd_buf = dpcd_buf; + u8 dpcd_buf; - if ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; - new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + if (bl->aux_set) + dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + else + dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM; - if (bl->pwmgen_bit_count) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); - if (ret != 1) - drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", - aux->name, ret); - } + if (bl->pwmgen_bit_count) { + ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); + if (ret != 1) + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", + aux->name, ret); } if (bl->pwm_freq_pre_divider) { @@ -3244,16 +3388,14 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli "%s: Failed to write aux backlight frequency: %d\n", aux->name, ret); else - new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; } - if (new_dpcd_buf != dpcd_buf) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf); - if (ret != 1) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", - aux->name, ret); - return ret < 0 ? ret : -EIO; - } + ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); + if (ret != 1) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", + aux->name, ret); + return ret < 0 ? ret : -EIO; } ret = drm_edp_backlight_set_level(aux, bl, level); @@ -3272,12 +3414,13 @@ EXPORT_SYMBOL(drm_edp_backlight_enable); * @aux: The DP AUX channel to use * @bl: Backlight capability info from drm_edp_backlight_init() * - * This function handles disabling DPCD backlight controls on a panel over AUX. Note that some - * panels have backlights that are enabled/disabled by other means, despite having their brightness - * values controlled through DPCD. On such panels &drm_edp_backlight_info.aux_enable will be set to - * %false, this function will become a no-op (and we will skip updating - * %DP_EDP_DISPLAY_CONTROL_REGISTER), and the driver must take care to perform it's own - * implementation specific step for disabling the backlight. + * This function handles disabling DPCD backlight controls on a panel over AUX. + * + * Note that certain panels do not support being enabled or disabled via DPCD, but instead require + * that the driver handle enabling/disabling the panel through implementation-specific means using + * the EDP_BL_PWR GPIO. For such panels, &drm_edp_backlight_info.aux_enable will be set to %false, + * this function becomes a no-op, and the driver is expected to handle powering the panel off using + * the EDP_BL_PWR GPIO. * * Returns: %0 on success or no-op, negative error code on failure. */ @@ -3301,6 +3444,9 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf int ret; u8 pn, pn_min, pn_max; + if (!bl->aux_set) + return 0; + ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); if (ret != 1) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", @@ -3386,7 +3532,7 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf } static inline int -drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, +drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, u8 *current_mode) { int ret; @@ -3401,6 +3547,9 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i } *current_mode = (mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK); + if (!bl->aux_set) + return 0; + if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { int size = 1 + bl->lsb_reg_used; @@ -3431,7 +3580,7 @@ drm_edp_backlight_probe_level(struct drm_dp_aux *aux, struct drm_edp_backlight_i * @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight * @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz * @edp_dpcd: A cached copy of the eDP DPCD - * @current_level: Where to store the probed brightness level + * @current_level: Where to store the probed brightness level, if any * @current_mode: Where to store the currently set backlight control mode * * Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities, @@ -3451,24 +3600,38 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl if (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) bl->aux_enable = true; + if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) + bl->aux_set = true; if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) bl->lsb_reg_used = true; + /* Sanity check caps */ + if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + drm_dbg_kms(aux->drm_dev, + "%s: Panel supports neither AUX or PWM brightness control? Aborting\n", + aux->name); + return -EINVAL; + } + ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd); if (ret < 0) return ret; - ret = drm_edp_backlight_probe_level(aux, bl, current_mode); + ret = drm_edp_backlight_probe_state(aux, bl, current_mode); if (ret < 0) return ret; *current_level = ret; drm_dbg_kms(aux->drm_dev, - "%s: Found backlight level=%d/%d pwm_freq_pre_divider=%d mode=%x\n", - aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, *current_mode); - drm_dbg_kms(aux->drm_dev, - "%s: Backlight caps: pwmgen_bit_count=%d lsb_reg_used=%d aux_enable=%d\n", - aux->name, bl->pwmgen_bit_count, bl->lsb_reg_used, bl->aux_enable); + "%s: Found backlight: aux_set=%d aux_enable=%d mode=%d\n", + aux->name, bl->aux_set, bl->aux_enable, *current_mode); + if (bl->aux_set) { + drm_dbg_kms(aux->drm_dev, + "%s: Backlight caps: level=%d/%d pwm_freq_pre_divider=%d lsb_reg_used=%d\n", + aux->name, *current_level, bl->max, bl->pwm_freq_pre_divider, + bl->lsb_reg_used); + } + return 0; } EXPORT_SYMBOL(drm_edp_backlight_init); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 86d13d6bc463..f3d79eda94bb 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1668,13 +1668,10 @@ __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, for (i = 0; i < history->len; i++) { const struct drm_dp_mst_topology_ref_entry *entry = &history->entries[i]; - ulong *entries; - uint nr_entries; u64 ts_nsec = entry->ts_nsec; u32 rem_nsec = do_div(ts_nsec, 1000000000); - nr_entries = stack_depot_fetch(entry->backtrace, &entries); - stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", entry->count, @@ -3355,6 +3352,10 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, /** * drm_dp_update_payload_part1() - Execute payload update part 1 * @mgr: manager to use. + * @start_slot: this is the cur slot + * + * NOTE: start_slot is a temporary workaround for non-atomic drivers, + * this will be removed when non-atomic mst helpers are moved out of the helper * * This iterates over all proposed virtual channels, and tries to * allocate space in the link for them. For 0->slots transitions, @@ -3365,12 +3366,12 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, * after calling this the driver should generate ACT and payload * packets. */ -int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot) { struct drm_dp_payload req_payload; struct drm_dp_mst_port *port; int i, j; - int cur_slots = 1; + int cur_slots = start_slot; bool skip; mutex_lock(&mgr->payload_lock); @@ -4334,10 +4335,6 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - /* max. time slots - one slot for MTP header */ - if (slots > 63) - return -ENOSPC; - vcpi->pbn = pbn; vcpi->aligned_pbn = slots * mgr->pbn_div; vcpi->num_slots = slots; @@ -4510,6 +4507,27 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); /** + * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format + * @mst_state: mst_state to update + * @link_encoding_cap: the ecoding format on the link + */ +void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap) +{ + if (link_encoding_cap == DP_CAP_ANSI_128B132B) { + mst_state->total_avail_slots = 64; + mst_state->start_slot = 0; + } else { + mst_state->total_avail_slots = 63; + mst_state->start_slot = 1; + } + + DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n", + (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b", + mst_state); +} +EXPORT_SYMBOL(drm_dp_mst_update_slots); + +/** * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel * @mgr: manager for this port * @port: port to allocate a virtual channel for. @@ -4540,7 +4558,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); if (ret) { - drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n", + drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); drm_dp_mst_topology_put_port(port); goto out; @@ -5228,7 +5246,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_vcpi_allocation *vcpi; - int avail_slots = 63, payload_count = 0; + int avail_slots = mst_state->total_avail_slots, payload_count = 0; list_for_each_entry(vcpi, &mst_state->vcpis, next) { /* Releasing VCPI is always OK-even if the port is gone */ @@ -5257,7 +5275,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, } } drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", - mgr, mst_state, avail_slots, 63 - avail_slots); + mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); return 0; } @@ -5534,6 +5552,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (mst_state == NULL) return -ENOMEM; + mst_state->total_avail_slots = 63; + mst_state->start_slot = 1; + mst_state->mgr = mgr; INIT_LIST_HEAD(&mst_state->vcpis); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 7a5097467ba5..8214a0b1ab7f 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -43,6 +43,7 @@ #include <drm/drm_managed.h> #include <drm/drm_mode_object.h> #include <drm/drm_print.h> +#include <drm/drm_privacy_screen_machine.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -581,6 +582,7 @@ static int drm_dev_init(struct drm_device *dev, const struct drm_driver *driver, struct device *parent) { + struct inode *inode; int ret; if (!drm_core_init_complete) { @@ -617,13 +619,15 @@ static int drm_dev_init(struct drm_device *dev, if (ret) return ret; - dev->anon_inode = drm_fs_inode_new(); - if (IS_ERR(dev->anon_inode)) { - ret = PTR_ERR(dev->anon_inode); + inode = drm_fs_inode_new(); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); goto err; } + dev->anon_inode = inode; + if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) @@ -1029,6 +1033,7 @@ static const struct file_operations drm_stub_fops = { static void drm_core_exit(void) { + drm_privacy_screen_lookup_exit(); unregister_chrdev(DRM_MAJOR, "drm"); debugfs_remove(drm_debugfs_root); drm_sysfs_destroy(); @@ -1056,6 +1061,8 @@ static int __init drm_core_init(void) if (ret < 0) goto error; + drm_privacy_screen_lookup_init(); + drm_core_init_complete = true; DRM_DEBUG("Initialized\n"); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9c9463ec5465..12893e7be89b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -28,6 +28,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include <linux/bitfield.h> #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/kernel.h> @@ -49,6 +50,11 @@ (((edid)->version > (maj)) || \ ((edid)->version == (maj) && (edid)->revision > (min))) +static int oui(u8 first, u8 second, u8 third) +{ + return (first << 16) | (second << 8) | third; +} + #define EDID_EST_TIMINGS 16 #define EDID_STD_TIMINGS 8 #define EDID_DETAILED_TIMINGS 4 @@ -1840,11 +1846,20 @@ static void connector_bad_edid(struct drm_connector *connector, u8 *edid, int num_blocks) { int i; - u8 num_of_ext = edid[0x7e]; + u8 last_block; + + /* + * 0x7e in the EDID is the number of extension blocks. The EDID + * is 1 (base block) + num_ext_blocks big. That means we can think + * of 0x7e in the EDID of the _index_ of the last block in the + * combined chunk of memory. + */ + last_block = edid[0x7e]; /* Calculate real checksum for the last edid extension block data */ - connector->real_edid_checksum = - drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH); + if (last_block < num_blocks) + connector->real_edid_checksum = + drm_edid_block_checksum(edid + last_block * EDID_LENGTH); if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; @@ -4187,32 +4202,24 @@ cea_db_offsets(const u8 *cea, int *start, int *end) static bool cea_db_is_hdmi_vsdb(const u8 *db) { - int hdmi_id; - if (cea_db_tag(db) != VENDOR_BLOCK) return false; if (cea_db_payload_len(db) < 5) return false; - hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); - - return hdmi_id == HDMI_IEEE_OUI; + return oui(db[3], db[2], db[1]) == HDMI_IEEE_OUI; } static bool cea_db_is_hdmi_forum_vsdb(const u8 *db) { - unsigned int oui; - if (cea_db_tag(db) != VENDOR_BLOCK) return false; if (cea_db_payload_len(db) < 7) return false; - oui = db[3] << 16 | db[2] << 8 | db[1]; - - return oui == HDMI_FORUM_IEEE_OUI; + return oui(db[3], db[2], db[1]) == HDMI_FORUM_IEEE_OUI; } static bool cea_db_is_vcdb(const u8 *db) @@ -5222,6 +5229,71 @@ void drm_get_monitor_range(struct drm_connector *connector, info->monitor_range.max_vfreq); } +static void drm_parse_vesa_mso_data(struct drm_connector *connector, + const struct displayid_block *block) +{ + struct displayid_vesa_vendor_specific_block *vesa = + (struct displayid_vesa_vendor_specific_block *)block; + struct drm_display_info *info = &connector->display_info; + + if (block->num_bytes < 3) { + drm_dbg_kms(connector->dev, "Unexpected vendor block size %u\n", + block->num_bytes); + return; + } + + if (oui(vesa->oui[0], vesa->oui[1], vesa->oui[2]) != VESA_IEEE_OUI) + return; + + if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) { + drm_dbg_kms(connector->dev, "Unexpected VESA vendor block size\n"); + return; + } + + switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) { + default: + drm_dbg_kms(connector->dev, "Reserved MSO mode value\n"); + fallthrough; + case 0: + info->mso_stream_count = 0; + break; + case 1: + info->mso_stream_count = 2; /* 2 or 4 links */ + break; + case 2: + info->mso_stream_count = 4; /* 4 links */ + break; + } + + if (!info->mso_stream_count) { + info->mso_pixel_overlap = 0; + return; + } + + info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso); + if (info->mso_pixel_overlap > 8) { + drm_dbg_kms(connector->dev, "Reserved MSO pixel overlap value %u\n", + info->mso_pixel_overlap); + info->mso_pixel_overlap = 8; + } + + drm_dbg_kms(connector->dev, "MSO stream count %u, pixel overlap %u\n", + info->mso_stream_count, info->mso_pixel_overlap); +} + +static void drm_update_mso(struct drm_connector *connector, const struct edid *edid) +{ + const struct displayid_block *block; + struct displayid_iter iter; + + displayid_iter_edid_begin(edid, &iter); + displayid_iter_for_each(block, &iter) { + if (block->tag == DATA_BLOCK_2_VENDOR_SPECIFIC) + drm_parse_vesa_mso_data(connector, block); + } + displayid_iter_end(&iter); +} + /* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset * all of the values which would have been set from EDID */ @@ -5245,6 +5317,9 @@ drm_reset_display_info(struct drm_connector *connector) info->non_desktop = 0; memset(&info->monitor_range, 0, sizeof(info->monitor_range)); + + info->mso_stream_count = 0; + info->mso_pixel_overlap = 0; } u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) @@ -5323,6 +5398,9 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; + + drm_update_mso(connector, edid); + return quirks; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 3ab078321045..9727a59d35fd 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1506,6 +1506,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; + struct drm_mode_config *config = &dev->mode_config; int ret = 0; int crtc_count = 0; struct drm_connector_list_iter conn_iter; @@ -1663,6 +1664,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, /* Handle our overallocation */ sizes.surface_height *= drm_fbdev_overalloc; sizes.surface_height /= 100; + if (sizes.surface_height > config->max_height) { + drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n", + config->max_height); + sizes.surface_height = config->max_height; + } /* push down into drivers */ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); @@ -2332,7 +2338,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, return PTR_ERR(fbi); fbi->fbops = &drm_fbdev_fb_ops; - fbi->screen_size = fb->height * fb->pitches[0]; + fbi->screen_size = sizes->surface_height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; drm_fb_helper_fill_info(fbi, fb_helper, sizes); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 69fde60e36b3..0f28dd2bdd72 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -17,71 +17,91 @@ #include <drm/drm_fourcc.h> #include <drm/drm_rect.h> -static unsigned int clip_offset(struct drm_rect *clip, - unsigned int pitch, unsigned int cpp) +static unsigned int clip_offset(const struct drm_rect *clip, unsigned int pitch, unsigned int cpp) { return clip->y1 * pitch + clip->x1 * cpp; } /** + * drm_fb_clip_offset - Returns the clipping rectangles byte-offset in a framebuffer + * @pitch: Framebuffer line pitch in byte + * @format: Framebuffer format + * @clip: Clip rectangle + * + * Returns: + * The byte offset of the clip rectangle's top-left corner within the framebuffer. + */ +unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format, + const struct drm_rect *clip) +{ + return clip_offset(clip, pitch, format->cpp[0]); +} +EXPORT_SYMBOL(drm_fb_clip_offset); + +/** * drm_fb_memcpy - Copy clip buffer * @dst: Destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. + * is at the top-left corner. */ -void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_memcpy(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int cpp = fb->format->cpp[0]; size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y, lines = clip->y2 - clip->y1; + if (!dst_pitch) + dst_pitch = len; + vaddr += clip_offset(clip, fb->pitches[0], cpp); for (y = 0; y < lines; y++) { memcpy(dst, vaddr, len); vaddr += fb->pitches[0]; - dst += len; + dst += dst_pitch; } } EXPORT_SYMBOL(drm_fb_memcpy); /** - * drm_fb_memcpy_dstclip - Copy clip buffer + * drm_fb_memcpy_toio - Copy clip buffer * @dst: Destination buffer (iomem) * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. + * This function does not apply clipping on dst, i.e. the destination + * is at the top-left corner. */ -void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_memcpy_toio(void __iomem *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int cpp = fb->format->cpp[0]; - unsigned int offset = clip_offset(clip, dst_pitch, cpp); size_t len = (clip->x2 - clip->x1) * cpp; unsigned int y, lines = clip->y2 - clip->y1; - vaddr += offset; - dst += offset; + if (!dst_pitch) + dst_pitch = len; + + vaddr += clip_offset(clip, fb->pitches[0], cpp); for (y = 0; y < lines; y++) { memcpy_toio(dst, vaddr, len); vaddr += fb->pitches[0]; dst += dst_pitch; } } -EXPORT_SYMBOL(drm_fb_memcpy_dstclip); +EXPORT_SYMBOL(drm_fb_memcpy_toio); /** * drm_fb_swab - Swap bytes into clip buffer * @dst: Destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: Source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -91,21 +111,27 @@ EXPORT_SYMBOL(drm_fb_memcpy_dstclip); * time to speed up slow uncached reads. * * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. + * is at the top-left corner. */ -void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip, bool cached) +void drm_fb_swab(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool cached) { u8 cpp = fb->format->cpp[0]; size_t len = drm_rect_width(clip) * cpp; - u16 *src16, *dst16 = dst; - u32 *src32, *dst32 = dst; + const u16 *src16; + const u32 *src32; + u16 *dst16; + u32 *dst32; unsigned int x, y; void *buf = NULL; if (WARN_ON_ONCE(cpp != 2 && cpp != 4)) return; + if (!dst_pitch) + dst_pitch = len; + if (!cached) buf = kmalloc(len, GFP_KERNEL); @@ -121,6 +147,9 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, src32 = src; } + dst16 = dst; + dst32 = dst; + for (x = clip->x1; x < clip->x2; x++) { if (cpp == 4) *dst32++ = swab32(*src32++); @@ -129,13 +158,14 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb, } src += fb->pitches[0]; + dst += dst_pitch; } kfree(buf); } EXPORT_SYMBOL(drm_fb_swab); -static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels) +static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, const __le32 *sbuf, unsigned int pixels) { unsigned int x; u32 pix; @@ -151,23 +181,24 @@ static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int /** * drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer * @dst: RGB332 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB332 devices that don't natively support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination is a small buffer - * containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb332(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { size_t width = drm_rect_width(clip); size_t src_len = width * sizeof(u32); unsigned int y; void *sbuf; + if (!dst_pitch) + dst_pitch = width; + /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */ sbuf = kmalloc(src_len, GFP_KERNEL); if (!sbuf) @@ -178,14 +209,14 @@ void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb, memcpy(sbuf, src, src_len); drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width); src += fb->pitches[0]; - dst += width; + dst += dst_pitch; } kfree(sbuf); } EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332); -static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, +static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, const u32 *sbuf, unsigned int pixels, bool swab) { @@ -206,6 +237,7 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, /** * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer * @dst: RGB565 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -213,13 +245,10 @@ static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf, * * Drivers can use this function for RGB565 devices that don't natively * support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, - struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab) +void drm_fb_xrgb8888_to_rgb565(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip, + bool swab) { size_t linepixels = clip->x2 - clip->x1; size_t src_len = linepixels * sizeof(u32); @@ -227,6 +256,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, unsigned y, lines = clip->y2 - clip->y1; void *sbuf; + if (!dst_pitch) + dst_pitch = dst_len; + /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. @@ -240,7 +272,7 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, memcpy(sbuf, vaddr, src_len); drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(sbuf); @@ -248,9 +280,9 @@ void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr, EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); /** - * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer + * drm_fb_xrgb8888_to_rgb565_toio - Convert XRGB8888 to RGB565 clip buffer * @dst: RGB565 destination buffer (iomem) - * @dst_pitch: destination buffer pitch + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -258,37 +290,36 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); * * Drivers can use this function for RGB565 devices that don't natively * support XRGB8888. - * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. */ -void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip, bool swab) +void drm_fb_xrgb8888_to_rgb565_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip, bool swab) { size_t linepixels = clip->x2 - clip->x1; size_t dst_len = linepixels * sizeof(u16); unsigned y, lines = clip->y2 - clip->y1; void *dbuf; + if (!dst_pitch) + dst_pitch = dst_len; + dbuf = kmalloc(dst_len, GFP_KERNEL); if (!dbuf) return; vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); - dst += clip_offset(clip, dst_pitch, sizeof(u16)); for (y = 0; y < lines; y++) { drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab); memcpy_toio(dst, dbuf, dst_len); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(dbuf); } -EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip); +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_toio); -static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf, +static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, const u32 *sbuf, unsigned int pixels) { unsigned int x; @@ -303,24 +334,25 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf, /** * drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer * @dst: RGB888 destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @src: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB888 devices that don't natively * support XRGB8888. - * - * This function does not apply clipping on dst, i.e. the destination - * is a small buffer containing the clip rect only. */ -void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb888(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { size_t width = drm_rect_width(clip); size_t src_len = width * sizeof(u32); unsigned int y; void *sbuf; + if (!dst_pitch) + dst_pitch = width * 3; + /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */ sbuf = kmalloc(src_len, GFP_KERNEL); if (!sbuf) @@ -331,7 +363,7 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, memcpy(sbuf, src, src_len); drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width); src += fb->pitches[0]; - dst += width * 3; + dst += dst_pitch; } kfree(sbuf); @@ -339,48 +371,103 @@ void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb, EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); /** - * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer + * drm_fb_xrgb8888_to_rgb888_toio - Convert XRGB8888 to RGB888 clip buffer * @dst: RGB565 destination buffer (iomem) - * @dst_pitch: destination buffer pitch + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * * Drivers can use this function for RGB888 devices that don't natively * support XRGB8888. - * - * This function applies clipping on dst, i.e. the destination is a - * full (iomem) framebuffer but only the clip rect content is copied over. */ -void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch, - void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_rgb888_toio(void __iomem *dst, unsigned int dst_pitch, + const void *vaddr, const struct drm_framebuffer *fb, + const struct drm_rect *clip) { size_t linepixels = clip->x2 - clip->x1; size_t dst_len = linepixels * 3; unsigned y, lines = clip->y2 - clip->y1; void *dbuf; + if (!dst_pitch) + dst_pitch = dst_len; + dbuf = kmalloc(dst_len, GFP_KERNEL); if (!dbuf) return; vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); - dst += clip_offset(clip, dst_pitch, sizeof(u16)); for (y = 0; y < lines; y++) { drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels); memcpy_toio(dst, dbuf, dst_len); vaddr += fb->pitches[0]; - dst += dst_len; + dst += dst_pitch; } kfree(dbuf); } -EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip); +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_toio); + +static void drm_fb_xrgb8888_to_xrgb2101010_line(u32 *dbuf, const u32 *sbuf, + unsigned int pixels) +{ + unsigned int x; + u32 val32; + + for (x = 0; x < pixels; x++) { + val32 = ((sbuf[x] & 0x000000FF) << 2) | + ((sbuf[x] & 0x0000FF00) << 4) | + ((sbuf[x] & 0x00FF0000) << 6); + *dbuf++ = val32 | ((val32 >> 8) & 0x00300C03); + } +} + +/** + * drm_fb_xrgb8888_to_xrgb2101010_toio - Convert XRGB8888 to XRGB2101010 clip + * buffer + * @dst: XRGB2101010 destination buffer (iomem) + * @dst_pitch: Number of bytes between two consecutive scanlines within dst + * @vaddr: XRGB8888 source buffer + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * + * Drivers can use this function for XRGB2101010 devices that don't natively + * support XRGB8888. + */ +void drm_fb_xrgb8888_to_xrgb2101010_toio(void __iomem *dst, + unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, + const struct drm_rect *clip) +{ + size_t linepixels = clip->x2 - clip->x1; + size_t dst_len = linepixels * sizeof(u32); + unsigned int y, lines = clip->y2 - clip->y1; + void *dbuf; + + if (!dst_pitch) + dst_pitch = dst_len; + + dbuf = kmalloc(dst_len, GFP_KERNEL); + if (!dbuf) + return; + + vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); + for (y = 0; y < lines; y++) { + drm_fb_xrgb8888_to_xrgb2101010_line(dbuf, vaddr, linepixels); + memcpy_toio(dst, dbuf, dst_len); + vaddr += fb->pitches[0]; + dst += dst_pitch; + } + + kfree(dbuf); +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010_toio); /** * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale * @dst: 8-bit grayscale destination buffer + * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @vaddr: XRGB8888 source buffer * @fb: DRM framebuffer * @clip: Clip rectangle area to copy @@ -394,16 +481,21 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip); * * ITU BT.601 is used for the RGB -> luma (brightness) conversion. */ -void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, - struct drm_rect *clip) +void drm_fb_xrgb8888_to_gray8(void *dst, unsigned int dst_pitch, const void *vaddr, + const struct drm_framebuffer *fb, const struct drm_rect *clip) { unsigned int len = (clip->x2 - clip->x1) * sizeof(u32); unsigned int x, y; void *buf; - u32 *src; + u8 *dst8; + u32 *src32; if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888)) return; + + if (!dst_pitch) + dst_pitch = drm_rect_width(clip); + /* * The cma memory is write-combined so reads are uncached. * Speed up by fetching one line at a time. @@ -412,20 +504,22 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, if (!buf) return; + vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32)); for (y = clip->y1; y < clip->y2; y++) { - src = vaddr + (y * fb->pitches[0]); - src += clip->x1; - memcpy(buf, src, len); - src = buf; + dst8 = dst; + src32 = memcpy(buf, vaddr, len); for (x = clip->x1; x < clip->x2; x++) { - u8 r = (*src & 0x00ff0000) >> 16; - u8 g = (*src & 0x0000ff00) >> 8; - u8 b = *src & 0x000000ff; + u8 r = (*src32 & 0x00ff0000) >> 16; + u8 g = (*src32 & 0x0000ff00) >> 8; + u8 b = *src32 & 0x000000ff; /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */ - *dst++ = (3 * r + 6 * g + b) / 10; - src++; + *dst8++ = (3 * r + 6 * g + b) / 10; + src32++; } + + vaddr += fb->pitches[0]; + dst += dst_pitch; } kfree(buf); @@ -433,7 +527,7 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb, EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); /** - * drm_fb_blit_rect_dstclip - Copy parts of a framebuffer to display memory + * drm_fb_blit_toio - Copy parts of a framebuffer to display memory * @dst: The display memory to copy to * @dst_pitch: Number of bytes between two consecutive scanlines within dst * @dst_format: FOURCC code of the display's color format @@ -445,17 +539,14 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8); * formats of the display and the framebuffer mismatch, the blit function * will attempt to convert between them. * - * Use drm_fb_blit_dstclip() to copy the full framebuffer. - * * Returns: * 0 on success, or * -EINVAL if the color-format conversion failed, or * a negative error code otherwise. */ -int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb, - struct drm_rect *clip) +int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_format, + const void *vmap, const struct drm_framebuffer *fb, + const struct drm_rect *clip) { uint32_t fb_format = fb->format->format; @@ -464,58 +555,32 @@ int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch, fb_format = DRM_FORMAT_XRGB8888; if (dst_format == DRM_FORMAT_ARGB8888) dst_format = DRM_FORMAT_XRGB8888; + if (fb_format == DRM_FORMAT_ARGB2101010) + fb_format = DRM_FORMAT_XRGB2101010; + if (dst_format == DRM_FORMAT_ARGB2101010) + dst_format = DRM_FORMAT_XRGB2101010; if (dst_format == fb_format) { - drm_fb_memcpy_dstclip(dst, dst_pitch, vmap, fb, clip); + drm_fb_memcpy_toio(dst, dst_pitch, vmap, fb, clip); return 0; } else if (dst_format == DRM_FORMAT_RGB565) { if (fb_format == DRM_FORMAT_XRGB8888) { - drm_fb_xrgb8888_to_rgb565_dstclip(dst, dst_pitch, - vmap, fb, clip, - false); + drm_fb_xrgb8888_to_rgb565_toio(dst, dst_pitch, vmap, fb, clip, false); return 0; } } else if (dst_format == DRM_FORMAT_RGB888) { if (fb_format == DRM_FORMAT_XRGB8888) { - drm_fb_xrgb8888_to_rgb888_dstclip(dst, dst_pitch, - vmap, fb, clip); + drm_fb_xrgb8888_to_rgb888_toio(dst, dst_pitch, vmap, fb, clip); + return 0; + } + } else if (dst_format == DRM_FORMAT_XRGB2101010) { + if (fb_format == DRM_FORMAT_XRGB8888) { + drm_fb_xrgb8888_to_xrgb2101010_toio(dst, dst_pitch, vmap, fb, clip); return 0; } } return -EINVAL; } -EXPORT_SYMBOL(drm_fb_blit_rect_dstclip); - -/** - * drm_fb_blit_dstclip - Copy framebuffer to display memory - * @dst: The display memory to copy to - * @dst_pitch: Number of bytes between two consecutive scanlines within dst - * @dst_format: FOURCC code of the display's color format - * @vmap: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * - * This function copies a full framebuffer to display memory. If the formats - * of the display and the framebuffer mismatch, the copy function will - * attempt to convert between them. - * - * See drm_fb_blit_rect_dstclip() for more information. - * - * Returns: - * 0 on success, or a negative error code otherwise. - */ -int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch, - uint32_t dst_format, void *vmap, - struct drm_framebuffer *fb) -{ - struct drm_rect fullscreen = { - .x1 = 0, - .x2 = fb->width, - .y1 = 0, - .y2 = fb->height, - }; - return drm_fb_blit_rect_dstclip(dst, dst_pitch, dst_format, vmap, fb, - &fullscreen); -} -EXPORT_SYMBOL(drm_fb_blit_dstclip); +EXPORT_SYMBOL(drm_fb_blit_toio); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 783844bfecc1..07741b678798 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -134,6 +134,8 @@ const struct drm_format_info *__drm_format_info(u32 format) static const struct drm_format_info formats[] = { { .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, + { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 }, { .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 }, @@ -267,6 +269,9 @@ const struct drm_format_info *__drm_format_info(u32 format) .num_planes = 3, .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, .vsub = 0, .is_yuv = true }, + { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, + .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, + .hsub = 2, .vsub = 2, .is_yuv = true}, }; unsigned int i; diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index e570398abd78..c3189afe10cb 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -143,6 +143,7 @@ */ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { + struct dma_resv_iter cursor; struct drm_gem_object *obj; struct dma_fence *fence; @@ -150,9 +151,18 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st return 0; obj = drm_gem_fb_get_obj(state->fb, 0); - fence = dma_resv_get_excl_unlocked(obj->resv); - drm_atomic_set_fence_for_plane(state, fence); + dma_resv_iter_begin(&cursor, obj->resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + /* TODO: Currently there should be only one write fence, so this + * here works fine. But drm_atomic_set_fence_for_plane() should + * be changed to be able to handle more fences in general for + * multiple BOs per fb anyway. */ + dma_fence_get(fence); + break; + } + dma_resv_iter_end(&cursor); + drm_atomic_set_fence_for_plane(state, fence); return 0; } EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index d53388199f34..cefd0cbf9deb 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -13,6 +13,7 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -31,14 +32,18 @@ * The DRM GEM/CMA helpers use this allocator as a means to provide buffer * objects that are physically contiguous in memory. This is useful for * display drivers that are unable to map scattered buffers via an IOMMU. + * + * For GEM callback helpers in struct &drm_gem_object functions, see likewise + * named functions with an _object_ infix (e.g., drm_gem_cma_object_vmap() wraps + * drm_gem_cma_vmap()). These helpers perform the necessary type conversion. */ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { - .free = drm_gem_cma_free_object, - .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_get_sg_table, - .vmap = drm_gem_cma_vmap, - .mmap = drm_gem_cma_mmap, + .free = drm_gem_cma_object_free, + .print_info = drm_gem_cma_object_print_info, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, + .mmap = drm_gem_cma_object_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; @@ -62,18 +67,21 @@ __drm_gem_cma_create(struct drm_device *drm, size_t size, bool private) struct drm_gem_object *gem_obj; int ret = 0; - if (drm->driver->gem_create_object) + if (drm->driver->gem_create_object) { gem_obj = drm->driver->gem_create_object(drm, size); - else - gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!gem_obj) - return ERR_PTR(-ENOMEM); + if (IS_ERR(gem_obj)) + return ERR_CAST(gem_obj); + cma_obj = to_drm_gem_cma_obj(gem_obj); + } else { + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + gem_obj = &cma_obj->base; + } if (!gem_obj->funcs) gem_obj->funcs = &drm_gem_cma_default_funcs; - cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); - if (private) { drm_gem_private_object_init(drm, gem_obj, size); @@ -191,18 +199,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, } /** - * drm_gem_cma_free_object - free resources associated with a CMA GEM object - * @gem_obj: GEM object to free + * drm_gem_cma_free - free resources associated with a CMA GEM object + * @cma_obj: CMA GEM object to free * * This function frees the backing memory of the CMA GEM object, cleans up the * GEM object state and frees the memory used to store the object itself. * If the buffer is imported and the virtual address is set, it is released. - * Drivers using the CMA helpers should set this as their - * &drm_gem_object_funcs.free callback. */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); + struct drm_gem_object *gem_obj = &cma_obj->base; struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr); if (gem_obj->import_attach) { @@ -210,15 +216,20 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } else if (cma_obj->vaddr) { - dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, - cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->map_noncoherent) + dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr, + DMA_TO_DEVICE); + else + dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr); } drm_gem_object_release(gem_obj); kfree(cma_obj); } -EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); +EXPORT_SYMBOL_GPL(drm_gem_cma_free); /** * drm_gem_cma_dumb_create_internal - create a dumb buffer object @@ -365,18 +376,15 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area); /** * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs + * @cma_obj: CMA GEM object * @p: DRM printer * @indent: Tab indentation level - * @obj: GEM object * - * This function can be used as the &drm_driver->gem_print_info callback. - * It prints paddr and vaddr for use in e.g. debugfs output. + * This function prints paddr and vaddr for use in e.g. debugfs output. */ -void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj) +void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, + struct drm_printer *p, unsigned int indent) { - const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr); drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr); } @@ -385,18 +393,17 @@ EXPORT_SYMBOL(drm_gem_cma_print_info); /** * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned * pages for a CMA GEM object - * @obj: GEM object + * @cma_obj: CMA GEM object * - * This function exports a scatter/gather table by - * calling the standard DMA mapping API. Drivers using the CMA helpers should - * set this as their &drm_gem_object_funcs.get_sg_table callback. + * This function exports a scatter/gather table by calling the standard + * DMA mapping API. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ -struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj) +struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); + struct drm_gem_object *obj = &cma_obj->base; struct sg_table *sgt; int ret; @@ -462,23 +469,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); /** * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual * address space - * @obj: GEM object + * @cma_obj: CMA GEM object * @map: Returns the kernel virtual address of the CMA GEM object's backing * store. * - * This function maps a buffer into the kernel's - * virtual address space. Since the CMA buffers are already mapped into the - * kernel virtual address space this simply returns the cached virtual - * address. Drivers using the CMA helpers should set this as their DRM - * driver's &drm_gem_object_funcs.vmap callback. + * This function maps a buffer into the kernel's virtual address space. + * Since the CMA buffers are already mapped into the kernel virtual address + * space this simply returns the cached virtual address. * * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map) { - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); - dma_buf_map_set_vaddr(map, cma_obj->vaddr); return 0; @@ -487,20 +490,19 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_vmap); /** * drm_gem_cma_mmap - memory-map an exported CMA GEM object - * @obj: GEM object + * @cma_obj: CMA GEM object * @vma: VMA for the area to be mapped * * This function maps a buffer into a userspace process's address space. * In addition to the usual GEM VMA setup it immediately faults in the entire - * object instead of using on-demand faulting. Drivers that use the CMA - * helpers should set this as their &drm_gem_object_funcs.mmap callback. + * object instead of using on-demand faulting. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma) { - struct drm_gem_cma_object *cma_obj; + struct drm_gem_object *obj = &cma_obj->base; int ret; /* @@ -511,8 +513,6 @@ int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); vma->vm_flags &= ~VM_PFNMAP; - cma_obj = to_drm_gem_cma_obj(obj); - if (cma_obj->map_noncoherent) { vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); @@ -578,3 +578,7 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev, return obj; } EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap); + +MODULE_DESCRIPTION("DRM CMA memory-management helpers"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 3c75d79dbb65..746fd8c73845 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -6,6 +6,7 @@ */ #include <linux/slab.h> +#include <linux/module.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fb_helper.h> @@ -17,6 +18,8 @@ #include "drm_internal.h" +MODULE_IMPORT_NS(DMA_BUF); + #define AFBC_HEADER_SIZE 16 #define AFBC_TH_LAYOUT_ALIGNMENT 8 #define AFBC_HDR_ALIGN 64 diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 0e0986dfbe0c..621924116eb4 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -5,10 +5,12 @@ #include <linux/dma-buf.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/module.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -21,22 +23,29 @@ #include <drm/drm_prime.h> #include <drm/drm_print.h> +MODULE_IMPORT_NS(DMA_BUF); + /** * DOC: overview * * This library provides helpers for GEM objects backed by shmem buffers * allocated using anonymous pageable memory. + * + * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. + * For GEM callback helpers in struct &drm_gem_object functions, see likewise + * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps + * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. */ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { - .free = drm_gem_shmem_free_object, - .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .free = drm_gem_shmem_object_free, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; static struct drm_gem_shmem_object * @@ -48,14 +57,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) size = PAGE_ALIGN(size); - if (dev->driver->gem_create_object) + if (dev->driver->gem_create_object) { obj = dev->driver->gem_create_object(dev, size); - else - obj = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!obj) - return ERR_PTR(-ENOMEM); - - shmem = to_drm_gem_shmem_obj(obj); + if (IS_ERR(obj)) + return ERR_CAST(obj); + shmem = to_drm_gem_shmem_obj(obj); + } else { + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + obj = &shmem->base; + } if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; @@ -116,16 +128,15 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t EXPORT_SYMBOL_GPL(drm_gem_shmem_create); /** - * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object - * @obj: GEM object to free + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free * * This function cleans up the GEM object state and frees the memory used to - * store the object itself. It should be used to implement - * &drm_gem_object_funcs.free. + * store the object itself. */ -void drm_gem_shmem_free_object(struct drm_gem_object *obj) +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct drm_gem_object *obj = &shmem->base; WARN_ON(shmem->vmap_use_count); @@ -149,7 +160,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj) mutex_destroy(&shmem->vmap_lock); kfree(shmem); } -EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object); +EXPORT_SYMBOL_GPL(drm_gem_shmem_free); static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { @@ -244,19 +255,16 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); /** * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function makes sure the backing pages are pinned in memory while the - * buffer is exported. It should only be used to implement - * &drm_gem_object_funcs.pin. + * buffer is exported. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_pin(struct drm_gem_object *obj) +int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); return drm_gem_shmem_get_pages(shmem); @@ -265,15 +273,13 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function removes the requirement that the backing pages are pinned in - * memory. It should only be used to implement &drm_gem_object_funcs.unpin. + * memory. */ -void drm_gem_shmem_unpin(struct drm_gem_object *obj) +void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); drm_gem_shmem_put_pages(shmem); @@ -339,20 +345,16 @@ err_zero_use: * store. * * This function makes sure that a contiguous kernel virtual address mapping - * exists for the buffer backing the shmem GEM object. - * - * This function can be used to implement &drm_gem_object_funcs.vmap. But it can - * also be called by drivers directly, in which case it will hide the - * differences between dma-buf imported and natively allocated objects. + * exists for the buffer backing the shmem GEM object. It hides the differences + * between dma-buf imported and natively allocated objects. * * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); int ret; ret = mutex_lock_interruptible(&shmem->vmap_lock); @@ -395,21 +397,18 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to * zero. * - * This function can be used to implement &drm_gem_object_funcs.vmap. But it can - * also be called by drivers directly, in which case it will hide the - * differences between dma-buf imported and natively allocated objects. + * This function hides the differences between dma-buf imported and natively + * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - mutex_lock(&shmem->vmap_lock); drm_gem_shmem_vunmap_locked(shmem, map); mutex_unlock(&shmem->vmap_lock); } EXPORT_SYMBOL(drm_gem_shmem_vunmap); -struct drm_gem_shmem_object * +static struct drm_gem_shmem_object * drm_gem_shmem_create_with_handle(struct drm_file *file_priv, struct drm_device *dev, size_t size, uint32_t *handle) @@ -433,15 +432,12 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, return shmem; } -EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) +int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - mutex_lock(&shmem->pages_lock); if (shmem->madv >= 0) @@ -455,14 +451,14 @@ int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) } EXPORT_SYMBOL(drm_gem_shmem_madvise); -void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); - dma_unmap_sgtable(obj->dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); + dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); kfree(shmem->sgt); shmem->sgt = NULL; @@ -481,18 +477,15 @@ void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) */ shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); - invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, - 0, (loff_t)-1); + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } EXPORT_SYMBOL(drm_gem_shmem_purge_locked); -bool drm_gem_shmem_purge(struct drm_gem_object *obj) +bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - if (!mutex_trylock(&shmem->pages_lock)) return false; - drm_gem_shmem_purge_locked(obj); + drm_gem_shmem_purge_locked(shmem); mutex_unlock(&shmem->pages_lock); return true; @@ -600,19 +593,18 @@ static const struct vm_operations_struct drm_gem_shmem_vm_ops = { /** * drm_gem_shmem_mmap - Memory-map a shmem GEM object - * @obj: gem object + * @shmem: shmem GEM object * @vma: VMA for the area to be mapped * * This function implements an augmented version of the GEM DRM file mmap - * operation for shmem objects. Drivers which employ the shmem helpers should - * use this function as their &drm_gem_object_funcs.mmap handler. + * operation for shmem objects. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) { - struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj = &shmem->base; int ret; if (obj->import_attach) { @@ -623,8 +615,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) return dma_buf_mmap(obj->dma_buf, vma, 0); } - shmem = to_drm_gem_shmem_obj(obj); - ret = drm_gem_shmem_get_pages(shmem); if (ret) { drm_gem_vm_close(vma); @@ -643,17 +633,13 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); /** * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs + * @shmem: shmem GEM object * @p: DRM printer * @indent: Tab indentation level - * @obj: GEM object - * - * This implements the &drm_gem_object_funcs.info callback. */ -void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent, - const struct drm_gem_object *obj) +void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, + struct drm_printer *p, unsigned int indent) { - const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); @@ -663,12 +649,10 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned * pages for a shmem GEM object - * @obj: GEM object + * @shmem: shmem GEM object * * This function exports a scatter/gather table suitable for PRIME usage by - * calling the standard DMA mapping API. Drivers should not call this function - * directly, instead it should only be used as an implementation for - * &drm_gem_object_funcs.get_sg_table. + * calling the standard DMA mapping API. * * Drivers who need to acquire an scatter/gather table for objects need to call * drm_gem_shmem_get_pages_sgt() instead. @@ -676,9 +660,9 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info); * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ -struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct drm_gem_object *obj = &shmem->base; WARN_ON(shmem->base.import_attach); @@ -689,7 +673,7 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); /** * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a * scatter/gather table for a shmem GEM object. - * @obj: GEM object + * @shmem: shmem GEM object * * This function returns a scatter/gather table suitable for driver usage. If * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg @@ -702,10 +686,10 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); * Returns: * A pointer to the scatter/gather table of pinned pages or errno on failure. */ -struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) +struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; int ret; - struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct sg_table *sgt; if (shmem->sgt) @@ -717,7 +701,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj) if (ret) return ERR_PTR(ret); - sgt = drm_gem_shmem_get_sg_table(&shmem->base); + sgt = drm_gem_shmem_get_sg_table(shmem); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto err_put_pages; @@ -774,3 +758,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, return &shmem->base; } EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); + +MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); +MODULE_IMPORT_NS(DMA_BUF); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index bfa386b98134..3f00192215d1 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -197,8 +197,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, if (dev->driver->gem_create_object) { gem = dev->driver->gem_create_object(dev, size); - if (!gem) - return ERR_PTR(-ENOMEM); + if (IS_ERR(gem)) + return ERR_CAST(gem); gbo = drm_gem_vram_of_gem(gem); } else { gbo = kzalloc(sizeof(*gbo), GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index c50fa6f0709f..60afa1865559 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -32,16 +32,16 @@ * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ -#include <linux/export.h> #include <linux/hash.h> #include <linux/mm.h> #include <linux/rculist.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <drm/drm_hashtab.h> #include <drm/drm_print.h> +#include "drm_legacy.h" + int drm_ht_create(struct drm_open_hash *ht, unsigned int order) { unsigned int size = 1 << order; @@ -58,7 +58,6 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order) } return 0; } -EXPORT_SYMBOL(drm_ht_create); void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) { @@ -135,7 +134,6 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) } return 0; } -EXPORT_SYMBOL(drm_ht_insert_item); /* * Just insert an item and return any "bits" bit key that hasn't been @@ -164,7 +162,6 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it } return 0; } -EXPORT_SYMBOL(drm_ht_just_insert_please); int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item) @@ -178,7 +175,6 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -EXPORT_SYMBOL(drm_ht_find_item); int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) { @@ -197,7 +193,6 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) hlist_del_init_rcu(&item->head); return 0; } -EXPORT_SYMBOL(drm_ht_remove_item); void drm_ht_remove(struct drm_open_hash *ht) { @@ -206,4 +201,3 @@ void drm_ht_remove(struct drm_open_hash *ht) ht->table = NULL; } } -EXPORT_SYMBOL(drm_ht_remove); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 13e1d5c4ec82..d327638e15ee 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -66,7 +66,6 @@ #include "drm_internal.h" -#if IS_ENABLED(CONFIG_DRM_LEGACY) static int drm_legacy_irq_install(struct drm_device *dev, int irq) { int ret; @@ -203,4 +202,3 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data, return -EINVAL; } } -#endif diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index c9206840c87f..70c9dba114a6 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -35,9 +35,47 @@ #include <drm/drm_legacy.h> struct agp_memory; +struct drm_buf_desc; struct drm_device; struct drm_file; -struct drm_buf_desc; +struct drm_hash_item; +struct drm_open_hash; + +/* + * Hash-table Support + */ + +#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +/* drm_hashtab.c */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) +int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); + +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +void drm_ht_remove(struct drm_open_hash *ht); +#endif + +/* + * RCU-safe interface + * + * The user of this API needs to make sure that two or more instances of the + * hash table manipulation functions are never run simultaneously. + * The lookup function drm_ht_find_item_rcu may, however, run simultaneously + * with any of the manipulation functions as long as it's called from within + * an RCU read-locked section. + */ +#define drm_ht_insert_item_rcu drm_ht_insert_item +#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please +#define drm_ht_remove_key_rcu drm_ht_remove_key +#define drm_ht_remove_item_rcu drm_ht_remove_item +#define drm_ht_find_item_rcu drm_ht_find_item /* * Generic DRM Contexts diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 71b646c4131f..ded8968b3e8a 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -15,9 +15,10 @@ #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> @@ -200,30 +201,38 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, struct drm_rect *clip, bool swap) { struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); - void *src = cma_obj->vaddr; + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + void *src; int ret; ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) return ret; + src = data[0].vaddr; /* TODO: Use mapping abstraction properly */ + + ret = drm_gem_fb_vmap(fb, map, data); + if (ret) + goto out_drm_gem_fb_end_cpu_access; switch (fb->format->format) { case DRM_FORMAT_RGB565: if (swap) - drm_fb_swab(dst, src, fb, clip, !gem->import_attach); + drm_fb_swab(dst, 0, src, fb, clip, !gem->import_attach); else - drm_fb_memcpy(dst, src, fb, clip); + drm_fb_memcpy(dst, 0, src, fb, clip); break; case DRM_FORMAT_XRGB8888: - drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap); + drm_fb_xrgb8888_to_rgb565(dst, 0, src, fb, clip, swap); break; default: drm_err_once(fb->dev, "Format is not supported: %p4cc\n", &fb->format->format); - return -EINVAL; + ret = -EINVAL; } + drm_gem_fb_vunmap(fb, map); +out_drm_gem_fb_end_cpu_access: drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); return ret; @@ -249,8 +258,8 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev, static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) { - struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0); - struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem); + struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev); unsigned int height = rect->y2 - rect->y1; unsigned int width = rect->x2 - rect->x1; @@ -266,6 +275,10 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) if (!drm_dev_enter(fb->dev, &idx)) return; + ret = drm_gem_fb_vmap(fb, map, data); + if (ret) + goto err_drm_dev_exit; + full = width == fb->width && height == fb->height; DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); @@ -277,7 +290,7 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect) if (ret) goto err_msg; } else { - tr = cma_obj->vaddr; + tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */ } mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1, @@ -289,6 +302,9 @@ err_msg: if (ret) drm_err_once(fb->dev, "Failed to update display %d\n", ret); + drm_gem_fb_vunmap(fb, map); + +err_drm_dev_exit: drm_dev_exit(idx); } @@ -1117,8 +1133,8 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi, /* * Even though it's not the SPI device that does DMA (the master does), - * the dma mask is necessary for the dma_alloc_wc() in - * drm_gem_cma_create(). The dma_addr returned will be a physical + * the dma mask is necessary for the dma_alloc_wc() in the GEM code + * (e.g., drm_gem_cma_create()). The dma_addr returned will be a physical * address which might be different from the bus address, but this is * not a problem since the address will not be used. * The virtual address is used in the transfer and the SPI core diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 93d48a6f04ab..7d1c578388d3 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -118,8 +118,6 @@ static noinline void save_stack(struct drm_mm_node *node) static void show_leaks(struct drm_mm *mm) { struct drm_mm_node *node; - unsigned long *entries; - unsigned int nr_entries; char *buf; buf = kmalloc(BUFSZ, GFP_KERNEL); @@ -133,8 +131,7 @@ static void show_leaks(struct drm_mm *mm) continue; } - nr_entries = stack_depot_fetch(node->stack, &entries); - stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); + stack_depot_snprint(node->stack, buf, BUFSZ, 0); DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", node->start, node->size, buf); } diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 83cf0bc64eb9..c97323365675 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -291,7 +291,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock, if (ctx->trylock_only) { lockdep_assert_held(&ctx->ww_ctx); - if (!ww_mutex_trylock(&lock->mutex)) + if (!ww_mutex_trylock(&lock->mutex, NULL)) return -EBUSY; else return 0; diff --git a/drivers/gpu/drm/drm_nomodeset.c b/drivers/gpu/drm/drm_nomodeset.c new file mode 100644 index 000000000000..f3978d5bd3a1 --- /dev/null +++ b/drivers/gpu/drm/drm_nomodeset.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/module.h> +#include <linux/types.h> + +static bool drm_nomodeset; + +bool drm_firmware_drivers_only(void) +{ + return drm_nomodeset; +} +EXPORT_SYMBOL(drm_firmware_drivers_only); + +static int __init disable_modeset(char *str) +{ + drm_nomodeset = true; + + pr_warn("Booted with the nomodeset parameter. Only the system framebuffer will be available\n"); + + return 1; +} + +/* Disable kernel modesetting */ +__setup("nomodeset", disable_modeset); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 37c34146eea8..59d368ea006b 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -402,3 +402,36 @@ int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS; } EXPORT_SYMBOL_GPL(drm_of_lvds_get_dual_link_pixel_order); + +/** + * drm_of_lvds_get_data_mapping - Get LVDS data mapping + * @port: DT port node of the LVDS source or sink + * + * Convert DT "data-mapping" property string value into media bus format value. + * + * Return: + * * MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - data-mapping is "jeida-18" + * * MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA - data-mapping is "jeida-24" + * * MEDIA_BUS_FMT_RGB888_1X7X4_SPWG - data-mapping is "vesa-24" + * * -EINVAL - the "data-mapping" property is unsupported + * * -ENODEV - the "data-mapping" property is missing + */ +int drm_of_lvds_get_data_mapping(const struct device_node *port) +{ + const char *mapping; + int ret; + + ret = of_property_read_string(port, "data-mapping", &mapping); + if (ret < 0) + return -ENODEV; + + if (!strcmp(mapping, "jeida-18")) + return MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; + if (!strcmp(mapping, "jeida-24")) + return MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + if (!strcmp(mapping, "vesa-24")) + return MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(drm_of_lvds_get_data_mapping); diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 62e8ccc7ab9c..042bb80383c9 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -140,6 +140,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO 2021 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), @@ -205,6 +211,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), }, .driver_data = (void *)&gpd_win2, + }, { /* GPD Win 3 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03") + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* I.T.Works TW891 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), @@ -250,6 +262,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Book X90F / X91F / X91L */ + .matches = { + /* Non exact match to match all versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index deb23dbec8b5..c773d3dfb1ab 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -29,6 +29,7 @@ #include <linux/export.h> #include <linux/dma-buf.h> #include <linux/rbtree.h> +#include <linux/module.h> #include <drm/drm.h> #include <drm/drm_drv.h> @@ -39,6 +40,8 @@ #include "drm_internal.h" +MODULE_IMPORT_NS(DMA_BUF); + /** * DOC: overview and lifetime rules * @@ -719,11 +722,13 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; + drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); - if (ret) + if (ret) { + drm_gem_object_put(obj); return ret; + } vma->vm_private_data = obj; - drm_gem_object_get(obj); return 0; } diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c new file mode 100644 index 000000000000..beaf99e9120a --- /dev/null +++ b/drivers/gpu/drm/drm_privacy_screen.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2020 - 2021 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <drm/drm_privacy_screen_machine.h> +#include <drm/drm_privacy_screen_consumer.h> +#include <drm/drm_privacy_screen_driver.h> +#include "drm_internal.h" + +/** + * DOC: overview + * + * This class allows non KMS drivers, from e.g. drivers/platform/x86 to + * register a privacy-screen device, which the KMS drivers can then use + * to implement the standard privacy-screen properties, see + * :ref:`Standard Connector Properties<standard_connector_properties>`. + * + * KMS drivers using a privacy-screen class device are advised to use the + * drm_connector_attach_privacy_screen_provider() and + * drm_connector_update_privacy_screen() helpers for dealing with this. + */ + +#define to_drm_privacy_screen(dev) \ + container_of(dev, struct drm_privacy_screen, dev) + +static DEFINE_MUTEX(drm_privacy_screen_lookup_lock); +static LIST_HEAD(drm_privacy_screen_lookup_list); + +static DEFINE_MUTEX(drm_privacy_screen_devs_lock); +static LIST_HEAD(drm_privacy_screen_devs); + +/*** drm_privacy_screen_machine.h functions ***/ + +/** + * drm_privacy_screen_lookup_add - add an entry to the static privacy-screen + * lookup list + * @lookup: lookup list entry to add + * + * Add an entry to the static privacy-screen lookup list. Note the + * &struct list_head which is part of the &struct drm_privacy_screen_lookup + * gets added to a list owned by the privacy-screen core. So the passed in + * &struct drm_privacy_screen_lookup must not be free-ed until it is removed + * from the lookup list by calling drm_privacy_screen_lookup_remove(). + */ +void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup) +{ + mutex_lock(&drm_privacy_screen_lookup_lock); + list_add(&lookup->list, &drm_privacy_screen_lookup_list); + mutex_unlock(&drm_privacy_screen_lookup_lock); +} +EXPORT_SYMBOL(drm_privacy_screen_lookup_add); + +/** + * drm_privacy_screen_lookup_remove - remove an entry to the static + * privacy-screen lookup list + * @lookup: lookup list entry to remove + * + * Remove an entry previously added with drm_privacy_screen_lookup_add() + * from the static privacy-screen lookup list. + */ +void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup) +{ + mutex_lock(&drm_privacy_screen_lookup_lock); + list_del(&lookup->list); + mutex_unlock(&drm_privacy_screen_lookup_lock); +} +EXPORT_SYMBOL(drm_privacy_screen_lookup_remove); + +/*** drm_privacy_screen_consumer.h functions ***/ + +static struct drm_privacy_screen *drm_privacy_screen_get_by_name( + const char *name) +{ + struct drm_privacy_screen *priv; + struct device *dev = NULL; + + mutex_lock(&drm_privacy_screen_devs_lock); + + list_for_each_entry(priv, &drm_privacy_screen_devs, list) { + if (strcmp(dev_name(&priv->dev), name) == 0) { + dev = get_device(&priv->dev); + break; + } + } + + mutex_unlock(&drm_privacy_screen_devs_lock); + + return dev ? to_drm_privacy_screen(dev) : NULL; +} + +/** + * drm_privacy_screen_get - get a privacy-screen provider + * @dev: consumer-device for which to get a privacy-screen provider + * @con_id: (video)connector name for which to get a privacy-screen provider + * + * Get a privacy-screen provider for a privacy-screen attached to the + * display described by the @dev and @con_id parameters. + * + * Return: + * * A pointer to a &struct drm_privacy_screen on success. + * * ERR_PTR(-ENODEV) if no matching privacy-screen is found + * * ERR_PTR(-EPROBE_DEFER) if there is a matching privacy-screen, + * but it has not been registered yet. + */ +struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev, + const char *con_id) +{ + const char *dev_id = dev ? dev_name(dev) : NULL; + struct drm_privacy_screen_lookup *l; + struct drm_privacy_screen *priv; + const char *provider = NULL; + int match, best = -1; + + /* + * For now we only support using a static lookup table, which is + * populated by the drm_privacy_screen_arch_init() call. This should + * be extended with device-tree / fw_node lookup when support is added + * for device-tree using hardware with a privacy-screen. + * + * The lookup algorithm was shamelessly taken from the clock + * framework: + * + * We do slightly fuzzy matching here: + * An entry with a NULL ID is assumed to be a wildcard. + * If an entry has a device ID, it must match + * If an entry has a connection ID, it must match + * Then we take the most specific entry - with the following order + * of precedence: dev+con > dev only > con only. + */ + mutex_lock(&drm_privacy_screen_lookup_lock); + + list_for_each_entry(l, &drm_privacy_screen_lookup_list, list) { + match = 0; + + if (l->dev_id) { + if (!dev_id || strcmp(l->dev_id, dev_id)) + continue; + + match += 2; + } + + if (l->con_id) { + if (!con_id || strcmp(l->con_id, con_id)) + continue; + + match += 1; + } + + if (match > best) { + provider = l->provider; + best = match; + } + } + + mutex_unlock(&drm_privacy_screen_lookup_lock); + + if (!provider) + return ERR_PTR(-ENODEV); + + priv = drm_privacy_screen_get_by_name(provider); + if (!priv) + return ERR_PTR(-EPROBE_DEFER); + + return priv; +} +EXPORT_SYMBOL(drm_privacy_screen_get); + +/** + * drm_privacy_screen_put - release a privacy-screen reference + * @priv: privacy screen reference to release + * + * Release a privacy-screen provider reference gotten through + * drm_privacy_screen_get(). May be called with a NULL or ERR_PTR, + * in which case it is a no-op. + */ +void drm_privacy_screen_put(struct drm_privacy_screen *priv) +{ + if (IS_ERR_OR_NULL(priv)) + return; + + put_device(&priv->dev); +} +EXPORT_SYMBOL(drm_privacy_screen_put); + +/** + * drm_privacy_screen_set_sw_state - set a privacy-screen's sw-state + * @priv: privacy screen to set the sw-state for + * @sw_state: new sw-state value to set + * + * Set the sw-state of a privacy screen. If the privacy-screen is not + * in a locked hw-state, then the actual and hw-state of the privacy-screen + * will be immediately updated to the new value. If the privacy-screen is + * in a locked hw-state, then the new sw-state will be remembered as the + * requested state to put the privacy-screen in when it becomes unlocked. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status sw_state) +{ + int ret = 0; + + mutex_lock(&priv->lock); + + if (!priv->ops) { + ret = -ENODEV; + goto out; + } + + /* + * As per the DRM connector properties documentation, setting the + * sw_state while the hw_state is locked is allowed. In this case + * it is a no-op other then storing the new sw_state so that it + * can be honored when the state gets unlocked. + * Also skip the set if the hw already is in the desired state. + */ + if (priv->hw_state >= PRIVACY_SCREEN_DISABLED_LOCKED || + priv->hw_state == sw_state) { + priv->sw_state = sw_state; + goto out; + } + + ret = priv->ops->set_sw_state(priv, sw_state); +out: + mutex_unlock(&priv->lock); + return ret; +} +EXPORT_SYMBOL(drm_privacy_screen_set_sw_state); + +/** + * drm_privacy_screen_get_state - get privacy-screen's current state + * @priv: privacy screen to get the state for + * @sw_state_ret: address where to store the privacy-screens current sw-state + * @hw_state_ret: address where to store the privacy-screens current hw-state + * + * Get the current state of a privacy-screen, both the sw-state and the + * hw-state. + */ +void drm_privacy_screen_get_state(struct drm_privacy_screen *priv, + enum drm_privacy_screen_status *sw_state_ret, + enum drm_privacy_screen_status *hw_state_ret) +{ + mutex_lock(&priv->lock); + *sw_state_ret = priv->sw_state; + *hw_state_ret = priv->hw_state; + mutex_unlock(&priv->lock); +} +EXPORT_SYMBOL(drm_privacy_screen_get_state); + +/** + * drm_privacy_screen_register_notifier - register a notifier + * @priv: Privacy screen to register the notifier with + * @nb: Notifier-block for the notifier to register + * + * Register a notifier with the privacy-screen to be notified of changes made + * to the privacy-screen state from outside of the privacy-screen class. + * E.g. the state may be changed by the hardware itself in response to a + * hotkey press. + * + * The notifier is called with no locks held. The new hw_state and sw_state + * can be retrieved using the drm_privacy_screen_get_state() function. + * A pointer to the drm_privacy_screen's struct is passed as the void *data + * argument of the notifier_block's notifier_call. + * + * The notifier will NOT be called when changes are made through + * drm_privacy_screen_set_sw_state(). It is only called for external changes. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&priv->notifier_head, nb); +} +EXPORT_SYMBOL(drm_privacy_screen_register_notifier); + +/** + * drm_privacy_screen_unregister_notifier - unregister a notifier + * @priv: Privacy screen to register the notifier with + * @nb: Notifier-block for the notifier to register + * + * Unregister a notifier registered with drm_privacy_screen_register_notifier(). + * + * Return: 0 on success, negative error code on failure. + */ +int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv, + struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&priv->notifier_head, nb); +} +EXPORT_SYMBOL(drm_privacy_screen_unregister_notifier); + +/*** drm_privacy_screen_driver.h functions ***/ + +static ssize_t sw_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + const char * const sw_state_names[] = { + "Disabled", + "Enabled", + }; + ssize_t ret; + + mutex_lock(&priv->lock); + + if (!priv->ops) + ret = -ENODEV; + else if (WARN_ON(priv->sw_state >= ARRAY_SIZE(sw_state_names))) + ret = -ENXIO; + else + ret = sprintf(buf, "%s\n", sw_state_names[priv->sw_state]); + + mutex_unlock(&priv->lock); + return ret; +} +/* + * RO: Do not allow setting the sw_state through sysfs, this MUST be done + * through the drm_properties on the drm_connector. + */ +static DEVICE_ATTR_RO(sw_state); + +static ssize_t hw_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + const char * const hw_state_names[] = { + "Disabled", + "Enabled", + "Disabled, locked", + "Enabled, locked", + }; + ssize_t ret; + + mutex_lock(&priv->lock); + + if (!priv->ops) + ret = -ENODEV; + else if (WARN_ON(priv->hw_state >= ARRAY_SIZE(hw_state_names))) + ret = -ENXIO; + else + ret = sprintf(buf, "%s\n", hw_state_names[priv->hw_state]); + + mutex_unlock(&priv->lock); + return ret; +} +static DEVICE_ATTR_RO(hw_state); + +static struct attribute *drm_privacy_screen_attrs[] = { + &dev_attr_sw_state.attr, + &dev_attr_hw_state.attr, + NULL +}; +ATTRIBUTE_GROUPS(drm_privacy_screen); + +static struct device_type drm_privacy_screen_type = { + .name = "privacy_screen", + .groups = drm_privacy_screen_groups, +}; + +static void drm_privacy_screen_device_release(struct device *dev) +{ + struct drm_privacy_screen *priv = to_drm_privacy_screen(dev); + + kfree(priv); +} + +/** + * drm_privacy_screen_register - register a privacy-screen + * @parent: parent-device for the privacy-screen + * @ops: &struct drm_privacy_screen_ops pointer with ops for the privacy-screen + * + * Create and register a privacy-screen. + * + * Return: + * * A pointer to the created privacy-screen on success. + * * An ERR_PTR(errno) on failure. + */ +struct drm_privacy_screen *drm_privacy_screen_register( + struct device *parent, const struct drm_privacy_screen_ops *ops) +{ + struct drm_privacy_screen *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + mutex_init(&priv->lock); + BLOCKING_INIT_NOTIFIER_HEAD(&priv->notifier_head); + + priv->dev.class = drm_class; + priv->dev.type = &drm_privacy_screen_type; + priv->dev.parent = parent; + priv->dev.release = drm_privacy_screen_device_release; + dev_set_name(&priv->dev, "privacy_screen-%s", dev_name(parent)); + priv->ops = ops; + + priv->ops->get_hw_state(priv); + + ret = device_register(&priv->dev); + if (ret) { + put_device(&priv->dev); + return ERR_PTR(ret); + } + + mutex_lock(&drm_privacy_screen_devs_lock); + list_add(&priv->list, &drm_privacy_screen_devs); + mutex_unlock(&drm_privacy_screen_devs_lock); + + return priv; +} +EXPORT_SYMBOL(drm_privacy_screen_register); + +/** + * drm_privacy_screen_unregister - unregister privacy-screen + * @priv: privacy-screen to unregister + * + * Unregister a privacy-screen registered with drm_privacy_screen_register(). + * May be called with a NULL or ERR_PTR, in which case it is a no-op. + */ +void drm_privacy_screen_unregister(struct drm_privacy_screen *priv) +{ + if (IS_ERR_OR_NULL(priv)) + return; + + mutex_lock(&drm_privacy_screen_devs_lock); + list_del(&priv->list); + mutex_unlock(&drm_privacy_screen_devs_lock); + + mutex_lock(&priv->lock); + priv->ops = NULL; + mutex_unlock(&priv->lock); + + device_unregister(&priv->dev); +} +EXPORT_SYMBOL(drm_privacy_screen_unregister); + +/** + * drm_privacy_screen_call_notifier_chain - notify consumers of state change + * @priv: Privacy screen to register the notifier with + * + * A privacy-screen provider driver can call this functions upon external + * changes to the privacy-screen state. E.g. the state may be changed by the + * hardware itself in response to a hotkey press. + * This function must be called without holding the privacy-screen lock. + * the driver must update sw_state and hw_state to reflect the new state before + * calling this function. + * The expected behavior from the driver upon receiving an external state + * change event is: 1. Take the lock; 2. Update sw_state and hw_state; + * 3. Release the lock. 4. Call drm_privacy_screen_call_notifier_chain(). + */ +void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv) +{ + blocking_notifier_call_chain(&priv->notifier_head, 0, priv); +} +EXPORT_SYMBOL(drm_privacy_screen_call_notifier_chain); diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c new file mode 100644 index 000000000000..a2cafb294ca6 --- /dev/null +++ b/drivers/gpu/drm/drm_privacy_screen_x86.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * Authors: + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <linux/acpi.h> +#include <drm/drm_privacy_screen_machine.h> + +#ifdef CONFIG_X86 +static struct drm_privacy_screen_lookup arch_lookup; + +struct arch_init_data { + struct drm_privacy_screen_lookup lookup; + bool (*detect)(void); +}; + +#if IS_ENABLED(CONFIG_THINKPAD_ACPI) +static acpi_status __init acpi_set_handle(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + *(acpi_handle *)return_value = handle; + return AE_CTRL_TERMINATE; +} + +static bool __init detect_thinkpad_privacy_screen(void) +{ + union acpi_object obj = { .type = ACPI_TYPE_INTEGER }; + struct acpi_object_list args = { .count = 1, .pointer = &obj, }; + acpi_handle ec_handle = NULL; + unsigned long long output; + acpi_status status; + + /* Get embedded-controller handle */ + status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle); + if (ACPI_FAILURE(status) || !ec_handle) + return false; + + /* And call the privacy-screen get-status method */ + status = acpi_evaluate_integer(ec_handle, "HKEY.GSSS", &args, &output); + if (ACPI_FAILURE(status)) + return false; + + return (output & 0x10000) ? true : false; +} +#endif + +static const struct arch_init_data arch_init_data[] __initconst = { +#if IS_ENABLED(CONFIG_THINKPAD_ACPI) + { + .lookup = { + .dev_id = NULL, + .con_id = NULL, + .provider = "privacy_screen-thinkpad_acpi", + }, + .detect = detect_thinkpad_privacy_screen, + }, +#endif +}; + +void __init drm_privacy_screen_lookup_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(arch_init_data); i++) { + if (!arch_init_data[i].detect()) + continue; + + pr_info("Found '%s' privacy-screen provider\n", + arch_init_data[i].lookup.provider); + + /* Make a copy because arch_init_data is __initconst */ + arch_lookup = arch_init_data[i].lookup; + drm_privacy_screen_lookup_add(&arch_lookup); + break; + } +} + +void drm_privacy_screen_lookup_exit(void) +{ + if (arch_lookup.provider) + drm_privacy_screen_lookup_remove(&arch_lookup); +} +#endif /* ifdef CONFIG_X86 */ diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 61d5c57f23e1..682359512996 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -604,6 +604,9 @@ EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); * * This function must be called from process context with no mode * setting locks held. + * + * If only a single connector has changed, consider calling + * drm_kms_helper_connector_hotplug_event() instead. */ void drm_kms_helper_hotplug_event(struct drm_device *dev) { @@ -616,6 +619,26 @@ void drm_kms_helper_hotplug_event(struct drm_device *dev) } EXPORT_SYMBOL(drm_kms_helper_hotplug_event); +/** + * drm_kms_helper_connector_hotplug_event - fire off a KMS connector hotplug event + * @connector: drm_connector which has changed + * + * This is the same as drm_kms_helper_hotplug_event(), except it fires a more + * fine-grained uevent for a single connector. + */ +void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + /* send a uevent + call fbdev */ + drm_sysfs_connector_hotplug_event(connector); + if (dev->mode_config.funcs->output_poll_changed) + dev->mode_config.funcs->output_poll_changed(dev); + + drm_client_dev_hotplug(dev); +} +EXPORT_SYMBOL(drm_kms_helper_connector_hotplug_event); + static void output_poll_execute(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); @@ -865,7 +888,7 @@ bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector) mutex_unlock(&dev->mode_config.mutex); if (changed) { - drm_kms_helper_hotplug_event(dev); + drm_kms_helper_connector_hotplug_event(connector); drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n", connector->base.id, connector->name); @@ -904,9 +927,9 @@ EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event); */ bool drm_helper_hpd_irq_event(struct drm_device *dev) { - struct drm_connector *connector; + struct drm_connector *connector, *first_changed_connector = NULL; struct drm_connector_list_iter conn_iter; - bool changed = false; + int changed = 0; if (!dev->mode_config.poll_enabled) return false; @@ -918,16 +941,25 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; - if (check_connector_changed(connector)) - changed = true; + if (check_connector_changed(connector)) { + if (!first_changed_connector) { + drm_connector_get(connector); + first_changed_connector = connector; + } + + changed++; + } } drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - if (changed) { + if (changed == 1) + drm_kms_helper_connector_hotplug_event(first_changed_connector); + else if (changed > 0) drm_kms_helper_hotplug_event(dev); - DRM_DEBUG_KMS("Sent hotplug event\n"); - } + + if (first_changed_connector) + drm_connector_put(first_changed_connector); return changed; } diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 6c353c9dc772..dfec479830e4 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -127,8 +127,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, property->num_values = num_values; INIT_LIST_HEAD(&property->enum_list); - strncpy(property->name, name, DRM_PROP_NAME_LEN); - property->name[DRM_PROP_NAME_LEN-1] = '\0'; + strscpy_pad(property->name, name, DRM_PROP_NAME_LEN); list_add_tail(&property->head, &dev->mode_config.property_list); @@ -421,8 +420,7 @@ int drm_property_add_enum(struct drm_property *property, if (!prop_enum) return -ENOMEM; - strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); - prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; + strscpy_pad(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->value = value; property->values[index] = value; @@ -475,8 +473,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, if (!property) return -ENOENT; - strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); - out_resp->name[DRM_PROP_NAME_LEN-1] = 0; + strscpy_pad(out_resp->name, property->name, DRM_PROP_NAME_LEN); out_resp->flags = property->flags; value_count = property->num_values; diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c9a9d74f338c..c313a5b4549c 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (*fence) { ret = dma_fence_chain_find_seqno(fence, point); - if (!ret) + if (!ret) { + /* If the requested seqno is already signaled + * drm_syncobj_find_fence may return a NULL + * fence. To make sure the recipient gets + * signalled, use a new fence instead. + */ + if (!*fence) + *fence = dma_fence_get_stub(); + goto out; + } dma_fence_put(*fence); } else { ret = -EINVAL; diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 76ff6ec3421b..430e00b16eec 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -410,6 +410,31 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) EXPORT_SYMBOL(drm_sysfs_hotplug_event); /** + * drm_sysfs_connector_hotplug_event - generate a DRM uevent for any connector + * change + * @connector: connector which has changed + * + * Send a uevent for the DRM connector specified by @connector. This will send + * a uevent with the properties HOTPLUG=1 and CONNECTOR. + */ +void drm_sysfs_connector_hotplug_event(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + char hotplug_str[] = "HOTPLUG=1", conn_id[21]; + char *envp[] = { hotplug_str, conn_id, NULL }; + + snprintf(conn_id, sizeof(conn_id), + "CONNECTOR=%u", connector->base.id); + + drm_dbg_kms(connector->dev, + "[CONNECTOR:%d:%s] generating connector hotplug event\n", + connector->base.id, connector->name); + + kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp); +} +EXPORT_SYMBOL(drm_sysfs_connector_hotplug_event); + +/** * drm_sysfs_connector_status_event - generate a DRM uevent for connector * property status change * @connector: connector on which property status changed diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 76d38561c910..cf741c5c82d2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, if (switch_mmu_context) { struct etnaviv_iommu_context *old_context = gpu->mmu_context; - etnaviv_iommu_context_get(mmu_context); - gpu->mmu_context = mmu_context; + gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); etnaviv_iommu_context_put(old_context); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8f1b5af47dd6..d5314aa28ff7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( list_del(&mapping->obj_node); } - etnaviv_iommu_context_get(mmu_context); - mapping->context = mmu_context; + mapping->context = etnaviv_iommu_context_get(mmu_context); mapping->use = 1; ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, @@ -425,45 +424,24 @@ int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, } #ifdef CONFIG_DEBUG_FS -static void etnaviv_gem_describe_fence(struct dma_fence *fence, - const char *type, struct seq_file *m) -{ - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - seq_printf(m, "\t%9s: %s %s seq %llu\n", - type, - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->seqno); -} - static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; - struct dma_fence *fence; unsigned long off = drm_vma_node_start(&obj->vma_node); + int r; seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n", etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I', obj->name, kref_read(&obj->refcount), off, etnaviv_obj->vaddr, obj->size); - rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); - etnaviv_gem_describe_fence(fence, "Shared", m); - } - } + r = dma_resv_lock(robj, NULL); + if (r) + return; - fence = dma_resv_excl_fence(robj); - if (fence) - etnaviv_gem_describe_fence(fence, "Exclusive", m); - rcu_read_unlock(); + dma_resv_describe(robj, m); + dma_resv_unlock(robj); } void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 6d8bed9c739d..6788ea8490d1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -5,10 +5,13 @@ #include <drm/drm_prime.h> #include <linux/dma-buf.h> +#include <linux/module.h> #include "etnaviv_drv.h" #include "etnaviv_gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static struct lock_class_key etnaviv_prime_lock_class; struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 4dd7d9d541c0..b5e8ce86dbe7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -189,13 +189,13 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit) continue; if (bo->flags & ETNA_SUBMIT_BO_WRITE) { - ret = dma_resv_get_fences(robj, &bo->excl, + ret = dma_resv_get_fences(robj, NULL, &bo->nr_shared, &bo->shared); if (ret) return ret; } else { - bo->excl = dma_resv_get_excl_unlocked(robj); + bo->excl = dma_fence_get(dma_resv_excl_fence(robj)); } } @@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_objects; submit->ctx = file->driver_priv; - etnaviv_iommu_context_get(submit->ctx->mmu); - submit->mmu_context = submit->ctx->mmu; + submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu); submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index c297fffe06eb..242a5fd8b932 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) /* We rely on the GPU running, so program the clock */ etnaviv_gpu_update_clock(gpu); + gpu->fe_running = false; + gpu->exec_state = -1; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = NULL; + return 0; } @@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); } + + gpu->fe_running = true; } -static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) +static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) { - u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, - &gpu->mmu_context->cmdbuf_mapping); u16 prefetch; + u32 address; /* setup the MMU */ - etnaviv_iommu_restore(gpu, gpu->mmu_context); + etnaviv_iommu_restore(gpu, context); /* Start command processor */ prefetch = etnaviv_buffer_init(gpu); + address = etnaviv_cmdbuf_get_va(&gpu->buffer, + &gpu->mmu_context->cmdbuf_mapping); etnaviv_gpu_start_fe(gpu, address, prefetch); } @@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) /* Now program the hardware */ mutex_lock(&gpu->lock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) spin_unlock(&gpu->event_spinlock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; - gpu->mmu_context = NULL; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - if (!gpu->mmu_context) { - etnaviv_iommu_context_get(submit->mmu_context); - gpu->mmu_context = submit->mmu_context; - etnaviv_gpu_start_fe_idleloop(gpu); - } else { - etnaviv_iommu_context_get(gpu->mmu_context); - submit->prev_mmu_context = gpu->mmu_context; - } + if (!gpu->fe_running) + etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); + + if (submit->prev_mmu_context) + etnaviv_iommu_context_put(submit->prev_mmu_context); + submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); if (submit->nr_pmrs) { gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; @@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) { - if (gpu->initialized && gpu->mmu_context) { + if (gpu->initialized && gpu->fe_running) { /* Replace the last WAIT with END */ mutex_lock(&gpu->lock); etnaviv_buffer_end(gpu); @@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) */ etnaviv_gpu_wait_idle(gpu, 100); - etnaviv_iommu_context_put(gpu->mmu_context); - gpu->mmu_context = NULL; + gpu->fe_running = false; } gpu->exec_state = -1; @@ -1729,7 +1733,6 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, DBG("%s", dev_name(gpu->dev)); - flush_workqueue(gpu->wq); destroy_workqueue(gpu->wq); etnaviv_sched_fini(gpu); @@ -1741,6 +1744,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, etnaviv_gpu_hw_suspend(gpu); #endif + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + if (gpu->initialized) { etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_iommu_global_fini(gpu); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 8ea48697d132..1c75c8ed5bce 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -101,6 +101,7 @@ struct etnaviv_gpu { struct workqueue_struct *wq; struct drm_gpu_scheduler sched; bool initialized; + bool fe_running; /* 'ring'-buffer: */ struct etnaviv_cmdbuf buffer; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 1a7c89a67bea..afe5dd6a9925 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); u32 pgtable; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + /* set base addresses */ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f8bf488e9d71..d664ae29ae20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + prefetch = etnaviv_buffer_config_mmuv2(gpu, (u32)v2_context->mtlb_dma, (u32)context->global->bad_page_dma); @@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, lower_32_bits(context->global->v2.pta_dma)); gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index dab1b58006d8..9fb1a2aadbcb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, */ list_for_each_entry_safe(m, n, &list, scan_node) { etnaviv_iommu_remove_mapping(context, m); + etnaviv_iommu_context_put(m->context); m->context = NULL; list_del_init(&m->mmu_node); list_del_init(&m->scan_node); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index d1d6902fd13b..e4a0b7d09c2e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf); struct etnaviv_iommu_context * etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, struct etnaviv_cmdbuf_suballoc *suballoc); -static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) +static inline struct etnaviv_iommu_context * +etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) { kref_get(&ctx->refcount); + return ctx; } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 9870c4e6af36..b5001db7a95c 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct decon_context *ctx; - struct resource *res; int ret; int i; @@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ctx->clks[i] = clk; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->addr = devm_ioremap_resource(dev, res); + ctx->addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->addr)) return PTR_ERR(ctx->addr); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index e39fac889edc..8d137857818c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = { static int exynos_dsi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct exynos_dsi *dsi; int ret, i; @@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev) } } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->reg_base = devm_ioremap_resource(dev, res); + dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->reg_base)) return PTR_ERR(dsi->reg_base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a3c718148c45..ecfd82d0afb7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -85,7 +85,6 @@ struct fimc_scaler { /* * A structure of fimc context. * - * @regs_res: register resources. * @regs: memory mapped io registers. * @lock: locking of operations. * @clocks: fimc clocks. @@ -103,7 +102,6 @@ struct fimc_context { struct exynos_drm_ipp_formats *formats; unsigned int num_formats; - struct resource *regs_res; void __iomem *regs; spinlock_t lock; struct clk *clocks[FIMC_CLKS_MAX]; @@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev) ctx->num_formats = num_formats; /* resource memory */ - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 700ca4fa6665..c735e53939d8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev) return PTR_ERR(ctx->lcd_clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - ctx->regs = devm_ioremap_resource(dev, res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index b00230626c6a..471fd6c8135f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = { static int g2d_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct g2d_data *g2d; int ret; @@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev) clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - g2d->regs = devm_ioremap_resource(dev, res); + g2d->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(g2d->regs)) { ret = PTR_ERR(g2d->regs); goto err_put_clk; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4396224227d1..0a0c042a3155 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -9,6 +9,7 @@ #include <linux/dma-buf.h> #include <linux/pfn_t.h> #include <linux/shmem_fs.h> +#include <linux/module.h> #include <drm/drm_prime.h> #include <drm/drm_vma_manager.h> @@ -17,6 +18,8 @@ #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap) { struct drm_device *dev = exynos_gem->base.dev; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 90d7bf906885..166a80262896 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -86,7 +86,6 @@ struct gsc_scaler { /* * A structure of gsc context. * - * @regs_res: register resources. * @regs: memory mapped io registers. * @gsc_clk: gsc gate clock. * @sc: scaler infomations. @@ -103,7 +102,6 @@ struct gsc_context { struct exynos_drm_ipp_formats *formats; unsigned int num_formats; - struct resource *regs_res; void __iomem *regs; const char **clk_names; struct clk *clocks[GSC_MAX_CLOCKS]; @@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev) } } - /* resource memory */ - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); + ctx->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->regs)) return PTR_ERR(ctx->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index ee61be4cf152..dec7df35baa9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = { static int rotator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *regs_res; struct rot_context *rot; const struct rot_variant *variant; int irq; @@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev) rot->formats = variant->formats; rot->num_formats = variant->num_formats; rot->dev = dev; - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rot->regs = devm_ioremap_resource(dev, regs_res); + rot->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rot->regs)) return PTR_ERR(rot->regs); diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index f9ae5b038d59..3a7851b7dc66 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = { static int scaler_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *regs_res; struct scaler_context *scaler; int irq; int ret, i; @@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev) (struct scaler_data *)of_device_get_match_data(dev); scaler->dev = dev; - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - scaler->regs = devm_ioremap_resource(dev, regs_res); + scaler->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(scaler->regs)) return PTR_ERR(scaler->regs); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index c769dec576de..7655142a4651 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev) struct hdmi_audio_infoframe *audio_infoframe; struct device *dev = &pdev->dev; struct hdmi_context *hdata; - struct resource *res; int ret; hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); @@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev) return ret; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hdata->regs = devm_ioremap_resource(dev, res); + hdata->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdata->regs)) { ret = PTR_ERR(hdata->regs); return ret; diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index d7dd8ba90e3a..e95e96c565ba 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -3,8 +3,8 @@ config DRM_FSL_DCU tristate "DRM Support for Freescale DCU" depends on DRM && OF && ARM && COMMON_CLK select BACKLIGHT_CLASS_DEVICE + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 321e416489a9..45df9de22007 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -25,7 +25,6 @@ #include "framebuffer.h" #include "gem.h" -#include "gtt.h" #include "psb_drv.h" #include "psb_intel_drv.h" #include "psb_intel_reg.h" @@ -82,14 +81,13 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf) struct drm_framebuffer *fb = vma->vm_private_data; struct drm_device *dev = fb->dev; struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct gtt_range *gtt = to_gtt_range(fb->obj[0]); + struct psb_gem_object *pobj = to_psb_gem_object(fb->obj[0]); int page_num; int i; unsigned long address; vm_fault_t ret = VM_FAULT_SIGBUS; unsigned long pfn; - unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + - gtt->offset; + unsigned long phys_addr = (unsigned long)dev_priv->stolen_base + pobj->offset; page_num = vma_pages(vma); address = vmf->address - (vmf->pgoff << PAGE_SHIFT); @@ -226,31 +224,6 @@ static struct drm_framebuffer *psb_framebuffer_create } /** - * psbfb_alloc - allocate frame buffer memory - * @dev: the DRM device - * @aligned_size: space needed - * - * Allocate the frame buffer. In the usual case we get a GTT range that - * is stolen memory backed and life is simple. If there isn't sufficient - * we fail as we don't have the virtual mapping space to really vmap it - * and the kernel console code can't handle non linear framebuffers. - * - * Re-address this as and if the framebuffer layer grows this ability. - */ -static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) -{ - struct gtt_range *backing; - /* Begin by trying to use stolen memory backing */ - backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE); - if (backing) { - backing->gem.funcs = &psb_gem_object_funcs; - drm_gem_private_object_init(dev, &backing->gem, aligned_size); - return backing; - } - return NULL; -} - -/** * psbfb_create - create a framebuffer * @fb_helper: the framebuffer helper * @sizes: specification of the layout @@ -268,7 +241,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, struct drm_mode_fb_cmd2 mode_cmd; int size; int ret; - struct gtt_range *backing; + struct psb_gem_object *backing; + struct drm_gem_object *obj; u32 bpp, depth; mode_cmd.width = sizes->surface_width; @@ -286,24 +260,25 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, size = ALIGN(size, PAGE_SIZE); /* Allocate the framebuffer in the GTT with stolen page backing */ - backing = psbfb_alloc(dev, size); - if (backing == NULL) - return -ENOMEM; + backing = psb_gem_create(dev, size, "fb", true, PAGE_SIZE); + if (IS_ERR(backing)) + return PTR_ERR(backing); + obj = &backing->base; memset(dev_priv->vram_addr + backing->offset, 0, size); info = drm_fb_helper_alloc_fbi(fb_helper); if (IS_ERR(info)) { ret = PTR_ERR(info); - goto out; + goto err_drm_gem_object_put; } mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); - fb = psb_framebuffer_create(dev, &mode_cmd, &backing->gem); + fb = psb_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) { ret = PTR_ERR(fb); - goto out; + goto err_drm_gem_object_put; } fb_helper->fb = fb; @@ -334,8 +309,9 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, dev_dbg(dev->dev, "allocated %dx%d fb\n", fb->width, fb->height); return 0; -out: - psb_gtt_free_range(dev, backing); + +err_drm_gem_object_put: + drm_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index 5ae54c9d2819..8d65af80bb08 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -13,24 +13,105 @@ #include <linux/pagemap.h> +#include <asm/set_memory.h> + #include <drm/drm.h> #include <drm/drm_vma_manager.h> #include "gem.h" #include "psb_drv.h" +int psb_gem_pin(struct psb_gem_object *pobj) +{ + struct drm_gem_object *obj = &pobj->base; + struct drm_device *dev = obj->dev; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + u32 gpu_base = dev_priv->gtt.gatt_start; + struct page **pages; + unsigned int npages; + int ret; + + mutex_lock(&dev_priv->gtt_mutex); + + if (pobj->in_gart || pobj->stolen) + goto out; /* already mapped */ + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto err_mutex_unlock; + } + + npages = obj->size / PAGE_SIZE; + + set_pages_array_wc(pages, npages); + + psb_gtt_insert_pages(dev_priv, &pobj->resource, pages); + psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages, + (gpu_base + pobj->offset), npages, 0, 0, + PSB_MMU_CACHED_MEMORY); + + pobj->npage = npages; + pobj->pages = pages; + +out: + ++pobj->in_gart; + mutex_unlock(&dev_priv->gtt_mutex); + + return 0; + +err_mutex_unlock: + mutex_unlock(&dev_priv->gtt_mutex); + return ret; +} + +void psb_gem_unpin(struct psb_gem_object *pobj) +{ + struct drm_gem_object *obj = &pobj->base; + struct drm_device *dev = obj->dev; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + u32 gpu_base = dev_priv->gtt.gatt_start; + + mutex_lock(&dev_priv->gtt_mutex); + + WARN_ON(!pobj->in_gart); + + --pobj->in_gart; + + if (pobj->in_gart || pobj->stolen) + goto out; + + psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), + (gpu_base + pobj->offset), pobj->npage, 0, 0); + psb_gtt_remove_pages(dev_priv, &pobj->resource); + + /* Reset caching flags */ + set_pages_array_wb(pobj->pages, pobj->npage); + + drm_gem_put_pages(obj, pobj->pages, true, false); + pobj->pages = NULL; + pobj->npage = 0; + +out: + mutex_unlock(&dev_priv->gtt_mutex); +} + static vm_fault_t psb_gem_fault(struct vm_fault *vmf); static void psb_gem_free_object(struct drm_gem_object *obj) { - struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); + struct psb_gem_object *pobj = to_psb_gem_object(obj); - /* Remove the list map if one is present */ - drm_gem_free_mmap_offset(obj); drm_gem_object_release(obj); - /* This must occur last as it frees up the memory of the GEM object */ - psb_gtt_free_range(obj->dev, gtt); + /* Undo the mmap pin if we are destroying the object */ + if (pobj->mmapping) + psb_gem_unpin(pobj); + + WARN_ON(pobj->in_gart && !pobj->stolen); + + release_resource(&pobj->resource); + kfree(pobj); } static const struct vm_operations_struct psb_gem_vm_ops = { @@ -39,63 +120,60 @@ static const struct vm_operations_struct psb_gem_vm_ops = { .close = drm_gem_vm_close, }; -const struct drm_gem_object_funcs psb_gem_object_funcs = { +static const struct drm_gem_object_funcs psb_gem_object_funcs = { .free = psb_gem_free_object, .vm_ops = &psb_gem_vm_ops, }; -/** - * psb_gem_create - create a mappable object - * @file: the DRM file of the client - * @dev: our device - * @size: the size requested - * @handlep: returned handle (opaque number) - * @stolen: unused - * @align: unused - * - * Create a GEM object, fill in the boilerplate and attach a handle to - * it so that userspace can speak about it. This does the core work - * for the various methods that do/will create GEM objects for things - */ -int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, - u32 *handlep, int stolen, u32 align) +struct psb_gem_object * +psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align) { - struct gtt_range *r; + struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + struct psb_gem_object *pobj; + struct drm_gem_object *obj; int ret; - u32 handle; size = roundup(size, PAGE_SIZE); - /* Allocate our object - for now a direct gtt range which is not - stolen memory backed */ - r = psb_gtt_alloc_range(dev, size, "gem", 0, PAGE_SIZE); - if (r == NULL) { - dev_err(dev->dev, "no memory for %lld byte GEM object\n", size); - return -ENOSPC; - } - r->gem.funcs = &psb_gem_object_funcs; - /* Initialize the extra goodies GEM needs to do all the hard work */ - if (drm_gem_object_init(dev, &r->gem, size) != 0) { - psb_gtt_free_range(dev, r); - /* GEM doesn't give an error code so use -ENOMEM */ - dev_err(dev->dev, "GEM init failed for %lld\n", size); - return -ENOMEM; + pobj = kzalloc(sizeof(*pobj), GFP_KERNEL); + if (!pobj) + return ERR_PTR(-ENOMEM); + obj = &pobj->base; + + /* GTT resource */ + + ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen, + &pobj->offset); + if (ret) + goto err_kfree; + + if (stolen) { + pobj->stolen = true; + pobj->in_gart = 1; } - /* Limit the object to 32bit mappings */ - mapping_set_gfp_mask(r->gem.filp->f_mapping, GFP_KERNEL | __GFP_DMA32); - /* Give the object a handle so we can carry it more easily */ - ret = drm_gem_handle_create(file, &r->gem, &handle); - if (ret) { - dev_err(dev->dev, "GEM handle failed for %p, %lld\n", - &r->gem, size); - drm_gem_object_release(&r->gem); - psb_gtt_free_range(dev, r); - return ret; + + /* GEM object */ + + obj->funcs = &psb_gem_object_funcs; + + if (stolen) { + drm_gem_private_object_init(dev, obj, size); + } else { + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto err_release_resource; + + /* Limit the object to 32-bit mappings */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32); } - /* We have the initial and handle reference but need only one now */ - drm_gem_object_put(&r->gem); - *handlep = handle; - return 0; + + return pobj; + +err_release_resource: + release_resource(&pobj->resource); +err_kfree: + kfree(pobj); + return ERR_PTR(ret); } /** @@ -111,10 +189,40 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); - args->size = args->pitch * args->height; - return psb_gem_create(file, dev, args->size, &args->handle, 0, - PAGE_SIZE); + size_t pitch, size; + struct psb_gem_object *pobj; + struct drm_gem_object *obj; + u32 handle; + int ret; + + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + pitch = ALIGN(pitch, 64); + + size = pitch * args->height; + size = roundup(size, PAGE_SIZE); + if (!size) + return -EINVAL; + + pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE); + if (IS_ERR(pobj)) + return PTR_ERR(pobj); + obj = &pobj->base; + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) + goto err_drm_gem_object_put; + + drm_gem_object_put(obj); + + args->pitch = pitch; + args->size = size; + args->handle = handle; + + return 0; + +err_drm_gem_object_put: + drm_gem_object_put(obj); + return ret; } /** @@ -137,7 +245,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj; - struct gtt_range *r; + struct psb_gem_object *pobj; int err; vm_fault_t ret; unsigned long pfn; @@ -149,7 +257,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) dev = obj->dev; dev_priv = to_drm_psb_private(dev); - r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */ + pobj = to_psb_gem_object(obj); /* Make sure we don't parallel update on a fault, nor move or remove something from beneath our feet */ @@ -157,14 +265,14 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) /* For now the mmap pins the object and it stays pinned. As things stand that will do us no harm */ - if (r->mmapping == 0) { - err = psb_gtt_pin(r); + if (pobj->mmapping == 0) { + err = psb_gem_pin(pobj); if (err < 0) { dev_err(dev->dev, "gma500: pin failed: %d\n", err); ret = vmf_error(err); goto fail; } - r->mmapping = 1; + pobj->mmapping = 1; } /* Page relative to the VMA start - we must calculate this ourselves @@ -172,10 +280,10 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf) page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; /* CPU view of the page, don't go via the GART for CPU writes */ - if (r->stolen) - pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT; + if (pobj->stolen) + pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT; else - pfn = page_to_pfn(r->pages[page_offset]); + pfn = page_to_pfn(pobj->pages[page_offset]); ret = vmf_insert_pfn(vma, vmf->address, pfn); fail: mutex_unlock(&dev_priv->mmap_mutex); diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h index bae6454ead29..79cced40c87f 100644 --- a/drivers/gpu/drm/gma500/gem.h +++ b/drivers/gpu/drm/gma500/gem.h @@ -8,11 +8,33 @@ #ifndef _GEM_H #define _GEM_H +#include <linux/kernel.h> + +#include <drm/drm_gem.h> + struct drm_device; -extern const struct drm_gem_object_funcs psb_gem_object_funcs; +struct psb_gem_object { + struct drm_gem_object base; + + struct resource resource; /* GTT resource for our allocation */ + u32 offset; /* GTT offset of our object */ + int in_gart; /* Currently in the GART (ref ct) */ + bool stolen; /* Backed from stolen RAM */ + bool mmapping; /* Is mmappable */ + struct page **pages; /* Backing pages if present */ + int npage; /* Number of backing pages */ +}; + +static inline struct psb_gem_object *to_psb_gem_object(struct drm_gem_object *obj) +{ + return container_of(obj, struct psb_gem_object, base); +} + +struct psb_gem_object * +psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align); -extern int psb_gem_create(struct drm_file *file, struct drm_device *dev, - u64 size, u32 *handlep, int stolen, u32 align); +int psb_gem_pin(struct psb_gem_object *pobj); +void psb_gem_unpin(struct psb_gem_object *pobj); #endif diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index cbcecbaa041b..99da3118131a 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -15,6 +15,7 @@ #include <drm/drm_vblank.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "psb_drv.h" #include "psb_intel_drv.h" @@ -54,7 +55,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct drm_framebuffer *fb = crtc->primary->fb; - struct gtt_range *gtt; + struct psb_gem_object *pobj; int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; @@ -70,14 +71,14 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, goto gma_pipe_cleaner; } - gtt = to_gtt_range(fb->obj[0]); + pobj = to_psb_gem_object(fb->obj[0]); /* We are displaying this buffer, make sure it is actually loaded into the GTT */ - ret = psb_gtt_pin(gtt); + ret = psb_gem_pin(pobj); if (ret < 0) goto gma_pipe_set_base_exit; - start = gtt->offset; + start = pobj->offset; offset = y * fb->pitches[0] + x * fb->format->cpp[0]; REG_WRITE(map->stride, fb->pitches[0]); @@ -125,7 +126,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, gma_pipe_cleaner: /* If there was a previous display we can now unpin it */ if (old_fb) - psb_gtt_unpin(to_gtt_range(old_fb->obj[0])); + psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); gma_pipe_set_base_exit: gma_power_end(dev); @@ -331,8 +332,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; uint32_t temp; size_t addr = 0; - struct gtt_range *gt; - struct gtt_range *cursor_gt = gma_crtc->cursor_gt; + struct psb_gem_object *pobj; + struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; struct drm_gem_object *obj; void *tmp_dst, *tmp_src; int ret = 0, i, cursor_pages; @@ -348,9 +349,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, /* Unpin the old GEM object */ if (gma_crtc->cursor_obj) { - gt = container_of(gma_crtc->cursor_obj, - struct gtt_range, gem); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(gma_crtc->cursor_obj); + psb_gem_unpin(pobj); drm_gem_object_put(gma_crtc->cursor_obj); gma_crtc->cursor_obj = NULL; } @@ -375,40 +375,40 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, goto unref_cursor; } - gt = container_of(obj, struct gtt_range, gem); + pobj = to_psb_gem_object(obj); /* Pin the memory into the GTT */ - ret = psb_gtt_pin(gt); + ret = psb_gem_pin(pobj); if (ret) { dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); goto unref_cursor; } if (dev_priv->ops->cursor_needs_phys) { - if (cursor_gt == NULL) { + if (!cursor_pobj) { dev_err(dev->dev, "No hardware cursor mem available"); ret = -ENOMEM; goto unref_cursor; } /* Prevent overflow */ - if (gt->npage > 4) + if (pobj->npage > 4) cursor_pages = 4; else - cursor_pages = gt->npage; + cursor_pages = pobj->npage; /* Copy the cursor to cursor mem */ - tmp_dst = dev_priv->vram_addr + cursor_gt->offset; + tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; for (i = 0; i < cursor_pages; i++) { - tmp_src = kmap(gt->pages[i]); + tmp_src = kmap(pobj->pages[i]); memcpy(tmp_dst, tmp_src, PAGE_SIZE); - kunmap(gt->pages[i]); + kunmap(pobj->pages[i]); tmp_dst += PAGE_SIZE; } addr = gma_crtc->cursor_addr; } else { - addr = gt->offset; + addr = pobj->offset; gma_crtc->cursor_addr = addr; } @@ -425,8 +425,8 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, /* unpin the old bo */ if (gma_crtc->cursor_obj) { - gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(gma_crtc->cursor_obj); + psb_gem_unpin(pobj); drm_gem_object_put(gma_crtc->cursor_obj); } @@ -483,14 +483,14 @@ void gma_crtc_commit(struct drm_crtc *crtc) void gma_crtc_disable(struct drm_crtc *crtc) { - struct gtt_range *gt; + struct psb_gem_object *pobj; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { - gt = to_gtt_range(crtc->primary->fb->obj[0]); - psb_gtt_unpin(gt); + pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); + psb_gem_unpin(pobj); } } @@ -498,6 +498,9 @@ void gma_crtc_destroy(struct drm_crtc *crtc) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + if (gma_crtc->cursor_pobj) + drm_gem_object_put(&gma_crtc->cursor_pobj->base); + kfree(gma_crtc->crtc_state); drm_crtc_cleanup(crtc); kfree(gma_crtc); diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 55a2a6919533..309ffe921bfd 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -7,10 +7,7 @@ * Alan Cox <alan@linux.intel.com> */ -#include <linux/shmem_fs.h> - -#include <asm/set_memory.h> - +#include "gem.h" /* TODO: for struct psb_gem_object, see psb_gtt_restore() */ #include "psb_drv.h" @@ -18,6 +15,33 @@ * GTT resource allocator - manage page mappings in GTT space */ +int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res, + const char *name, resource_size_t size, resource_size_t align, + bool stolen, u32 *offset) +{ + struct resource *root = pdev->gtt_mem; + resource_size_t start, end; + int ret; + + if (stolen) { + /* The start of the GTT is backed by stolen pages. */ + start = root->start; + end = root->start + pdev->gtt.stolen_size - 1; + } else { + /* The rest is backed by system pages. */ + start = root->start + pdev->gtt.stolen_size; + end = root->end; + } + + res->name = name; + ret = allocate_resource(root, res, size, start, end, align, NULL, NULL); + if (ret) + return ret; + *offset = res->start - root->start; + + return 0; +} + /** * psb_gtt_mask_pte - generate GTT pte entry * @pfn: page number to encode @@ -43,281 +67,62 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) return (pfn << PAGE_SHIFT) | mask; } -/** - * psb_gtt_entry - find the GTT entries for a gtt_range - * @dev: our DRM device - * @r: our GTT range - * - * Given a gtt_range object return the GTT offset of the page table - * entries for this gtt_range - */ -static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) +static u32 __iomem *psb_gtt_entry(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - unsigned long offset; + unsigned long offset = res->start - pdev->gtt_mem->start; - offset = r->resource.start - dev_priv->gtt_mem->start; - - return dev_priv->gtt_map + (offset >> PAGE_SHIFT); + return pdev->gtt_map + (offset >> PAGE_SHIFT); } -/** - * psb_gtt_insert - put an object into the GTT - * @dev: our DRM device - * @r: our GTT range - * @resume: on resume - * - * Take our preallocated GTT range and insert the GEM object into - * the GTT. This is protected via the gtt mutex which the caller - * must hold. +/* + * Take our preallocated GTT range and insert the GEM object into + * the GTT. This is protected via the gtt mutex which the caller + * must hold. */ -static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, - int resume) +void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res, + struct page **pages) { + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - struct page **pages; - int i; - if (r->pages == NULL) { - WARN_ON(1); - return -EINVAL; - } - - WARN_ON(r->stolen); /* refcount these maybe ? */ - - gtt_slot = psb_gtt_entry(dev, r); - pages = r->pages; + /* Write our page entries into the GTT itself */ - if (!resume) { - /* Make sure changes are visible to the GPU */ - set_pages_array_wc(pages, r->npage); - } + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); - /* Write our page entries into the GTT itself */ - for (i = 0; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), - PSB_MMU_CACHED_MEMORY); - iowrite32(pte, gtt_slot++); + for (i = 0; i < npages; ++i, ++gtt_slot) { + pte = psb_gtt_mask_pte(page_to_pfn(pages[i]), PSB_MMU_CACHED_MEMORY); + iowrite32(pte, gtt_slot); } /* Make sure all the entries are set before we return */ ioread32(gtt_slot - 1); - - return 0; } -/** - * psb_gtt_remove - remove an object from the GTT - * @dev: our DRM device - * @r: our GTT range - * - * Remove a preallocated GTT range from the GTT. Overwrite all the - * page table entries with the dummy page. This is protected via the gtt - * mutex which the caller must hold. +/* + * Remove a preallocated GTT range from the GTT. Overwrite all the + * page table entries with the dummy page. This is protected via the gtt + * mutex which the caller must hold. */ -static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) +void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res) { - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); + resource_size_t npages, i; u32 __iomem *gtt_slot; u32 pte; - int i; - - WARN_ON(r->stolen); - - gtt_slot = psb_gtt_entry(dev, r); - pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), - PSB_MMU_CACHED_MEMORY); - - for (i = 0; i < r->npage; i++) - iowrite32(pte, gtt_slot++); - ioread32(gtt_slot - 1); - set_pages_array_wb(r->pages, r->npage); -} - -/** - * psb_gtt_attach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Pin and build an in kernel list of the pages that back our GEM object. - * While we hold this the pages cannot be swapped out. This is protected - * via the gtt mutex which the caller must hold. - */ -static int psb_gtt_attach_pages(struct gtt_range *gt) -{ - struct page **pages; - - WARN_ON(gt->pages); - - pages = drm_gem_get_pages(>->gem); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - gt->npage = gt->gem.size / PAGE_SIZE; - gt->pages = pages; - - return 0; -} - -/** - * psb_gtt_detach_pages - attach and pin GEM pages - * @gt: the gtt range - * - * Undo the effect of psb_gtt_attach_pages. At this point the pages - * must have been removed from the GTT as they could now be paged out - * and move bus address. This is protected via the gtt mutex which the - * caller must hold. - */ -static void psb_gtt_detach_pages(struct gtt_range *gt) -{ - drm_gem_put_pages(>->gem, gt->pages, true, false); - gt->pages = NULL; -} -/** - * psb_gtt_pin - pin pages into the GTT - * @gt: range to pin - * - * Pin a set of pages into the GTT. The pins are refcounted so that - * multiple pins need multiple unpins to undo. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -int psb_gtt_pin(struct gtt_range *gt) -{ - int ret = 0; - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + /* Install scratch page for the resource */ - mutex_lock(&dev_priv->gtt_mutex); + pte = psb_gtt_mask_pte(page_to_pfn(pdev->scratch_page), PSB_MMU_CACHED_MEMORY); - if (gt->in_gart == 0 && gt->stolen == 0) { - ret = psb_gtt_attach_pages(gt); - if (ret < 0) - goto out; - ret = psb_gtt_insert(dev, gt, 0); - if (ret < 0) { - psb_gtt_detach_pages(gt); - goto out; - } - psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), - gt->pages, (gpu_base + gt->offset), - gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY); - } - gt->in_gart++; -out: - mutex_unlock(&dev_priv->gtt_mutex); - return ret; -} + npages = resource_size(res) >> PAGE_SHIFT; + gtt_slot = psb_gtt_entry(pdev, res); -/** - * psb_gtt_unpin - Drop a GTT pin requirement - * @gt: range to pin - * - * Undoes the effect of psb_gtt_pin. On the last drop the GEM object - * will be removed from the GTT which will also drop the page references - * and allow the VM to clean up or page stuff. - * - * Non GEM backed objects treat this as a no-op as they are always GTT - * backed objects. - */ -void psb_gtt_unpin(struct gtt_range *gt) -{ - struct drm_device *dev = gt->gem.dev; - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - u32 gpu_base = dev_priv->gtt.gatt_start; + for (i = 0; i < npages; ++i, ++gtt_slot) + iowrite32(pte, gtt_slot); - mutex_lock(&dev_priv->gtt_mutex); - - WARN_ON(!gt->in_gart); - - gt->in_gart--; - if (gt->in_gart == 0 && gt->stolen == 0) { - psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), - (gpu_base + gt->offset), gt->npage, 0, 0); - psb_gtt_remove(dev, gt); - psb_gtt_detach_pages(gt); - } - - mutex_unlock(&dev_priv->gtt_mutex); -} - -/* - * GTT resource allocator - allocate and manage GTT address space - */ - -/** - * psb_gtt_alloc_range - allocate GTT address space - * @dev: Our DRM device - * @len: length (bytes) of address space required - * @name: resource name - * @backed: resource should be backed by stolen pages - * @align: requested alignment - * - * Ask the kernel core to find us a suitable range of addresses - * to use for a GTT mapping. - * - * Returns a gtt_range structure describing the object, or NULL on - * error. On successful return the resource is both allocated and marked - * as in use. - */ -struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed, u32 align) -{ - struct drm_psb_private *dev_priv = to_drm_psb_private(dev); - struct gtt_range *gt; - struct resource *r = dev_priv->gtt_mem; - int ret; - unsigned long start, end; - - if (backed) { - /* The start of the GTT is the stolen pages */ - start = r->start; - end = r->start + dev_priv->gtt.stolen_size - 1; - } else { - /* The rest we will use for GEM backed objects */ - start = r->start + dev_priv->gtt.stolen_size; - end = r->end; - } - - gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL); - if (gt == NULL) - return NULL; - gt->resource.name = name; - gt->stolen = backed; - gt->in_gart = backed; - /* Ensure this is set for non GEM objects */ - gt->gem.dev = dev; - ret = allocate_resource(dev_priv->gtt_mem, >->resource, - len, start, end, align, NULL, NULL); - if (ret == 0) { - gt->offset = gt->resource.start - r->start; - return gt; - } - kfree(gt); - return NULL; -} - -/** - * psb_gtt_free_range - release GTT address space - * @dev: our DRM device - * @gt: a mapping created with psb_gtt_alloc_range - * - * Release a resource that was allocated with psb_gtt_alloc_range. If the - * object has been pinned by mmap users we clean this up here currently. - */ -void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) -{ - /* Undo the mmap pin if we are destroying the object */ - if (gt->mmapping) { - psb_gtt_unpin(gt); - gt->mmapping = 0; - } - WARN_ON(gt->in_gart && !gt->stolen); - release_resource(>->resource); - kfree(gt); + /* Make sure all the entries are set before we return */ + ioread32(gtt_slot - 1); } static void psb_gtt_alloc(struct drm_device *dev) @@ -498,7 +303,7 @@ int psb_gtt_restore(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct resource *r = dev_priv->gtt_mem->child; - struct gtt_range *range; + struct psb_gem_object *pobj; unsigned int restored = 0, total = 0, size = 0; /* On resume, the gtt_mutex is already initialized */ @@ -506,10 +311,15 @@ int psb_gtt_restore(struct drm_device *dev) psb_gtt_init(dev, 1); while (r != NULL) { - range = container_of(r, struct gtt_range, resource); - if (range->pages) { - psb_gtt_insert(dev, range, 1); - size += range->resource.end - range->resource.start; + /* + * TODO: GTT restoration needs a refactoring, so that we don't have to touch + * struct psb_gem_object here. The type represents a GEM object and is + * not related to the GTT itself. + */ + pobj = container_of(r, struct psb_gem_object, resource); + if (pobj->pages) { + psb_gtt_insert_pages(dev_priv, &pobj->resource, pobj->pages); + size += pobj->resource.end - pobj->resource.start; restored++; } r = r->sibling; diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h index 2bf165849ebe..ff1dcdd1ff52 100644 --- a/drivers/gpu/drm/gma500/gtt.h +++ b/drivers/gpu/drm/gma500/gtt.h @@ -10,6 +10,8 @@ #include <drm/drm_gem.h> +struct drm_psb_private; + /* This wants cleaning up with respect to the psb_dev and un-needed stuff */ struct psb_gtt { uint32_t gatt_start; @@ -26,27 +28,14 @@ struct psb_gtt { /* Exported functions */ extern int psb_gtt_init(struct drm_device *dev, int resume); extern void psb_gtt_takedown(struct drm_device *dev); +extern int psb_gtt_restore(struct drm_device *dev); -/* Each gtt_range describes an allocation in the GTT area */ -struct gtt_range { - struct resource resource; /* Resource for our allocation */ - u32 offset; /* GTT offset of our object */ - struct drm_gem_object gem; /* GEM high level stuff */ - int in_gart; /* Currently in the GART (ref ct) */ - bool stolen; /* Backed from stolen RAM */ - bool mmapping; /* Is mmappable */ - struct page **pages; /* Backing pages if present */ - int npage; /* Number of backing pages */ -}; +int psb_gtt_allocate_resource(struct drm_psb_private *pdev, struct resource *res, + const char *name, resource_size_t size, resource_size_t align, + bool stolen, u32 *offset); -#define to_gtt_range(x) container_of(x, struct gtt_range, gem) +void psb_gtt_insert_pages(struct drm_psb_private *pdev, const struct resource *res, + struct page **pages); +void psb_gtt_remove_pages(struct drm_psb_private *pdev, const struct resource *res); -extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed, - u32 align); -extern void psb_gtt_kref_put(struct gtt_range *gt); -extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt); -extern int psb_gtt_pin(struct gtt_range *gt); -extern void psb_gtt_unpin(struct gtt_range *gt); -extern int psb_gtt_restore(struct drm_device *dev); #endif diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index c6b115954b7d..36c7c2686c90 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -10,6 +10,7 @@ #include <drm/drm_fourcc.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "power.h" #include "psb_drv.h" @@ -608,7 +609,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, if (!gma_power_begin(dev, true)) return 0; - start = to_gtt_range(fb->obj[0])->offset; + start = to_psb_gem_object(fb->obj[0])->offset; offset = y * fb->pitches[0] + x * fb->format->cpp[0]; REG_WRITE(map->stride, fb->pitches[0]); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 7a10bb39ef0b..65cf1c79dd7c 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -19,6 +19,7 @@ #include <acpi/video.h> #include <drm/drm.h> +#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> @@ -448,6 +449,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct drm_device *dev; int ret; + /* + * We cannot yet easily find the framebuffer's location in memory. So + * remove all framebuffers here. + * + * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we + * might be able to read the framebuffer range from the device. + */ + ret = drm_aperture_remove_framebuffers(true, &driver); + if (ret) + return ret; + ret = pcim_enable_device(pdev); if (ret) return ret; diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index f5f259fde88e..d5f95212934e 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -12,6 +12,7 @@ #include <drm/drm_plane_helper.h> #include "framebuffer.h" +#include "gem.h" #include "gma_display.h" #include "power.h" #include "psb_drv.h" @@ -454,23 +455,21 @@ static void psb_intel_cursor_init(struct drm_device *dev, struct drm_psb_private *dev_priv = to_drm_psb_private(dev); u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR }; u32 base[3] = { CURABASE, CURBBASE, CURCBASE }; - struct gtt_range *cursor_gt; + struct psb_gem_object *cursor_pobj; if (dev_priv->ops->cursor_needs_phys) { /* Allocate 4 pages of stolen mem for a hardware cursor. That * is enough for the 64 x 64 ARGB cursors we support. */ - cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1, - PAGE_SIZE); - if (!cursor_gt) { - gma_crtc->cursor_gt = NULL; + cursor_pobj = psb_gem_create(dev, 4 * PAGE_SIZE, "cursor", true, PAGE_SIZE); + if (IS_ERR(cursor_pobj)) { + gma_crtc->cursor_pobj = NULL; goto out; } - gma_crtc->cursor_gt = cursor_gt; - gma_crtc->cursor_addr = dev_priv->stolen_base + - cursor_gt->offset; + gma_crtc->cursor_pobj = cursor_pobj; + gma_crtc->cursor_addr = dev_priv->stolen_base + cursor_pobj->offset; } else { - gma_crtc->cursor_gt = NULL; + gma_crtc->cursor_pobj = NULL; } out: diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 5340225d6997..db3e757328fe 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -140,7 +140,7 @@ struct gma_crtc { int pipe; int plane; uint32_t cursor_addr; - struct gtt_range *cursor_gt; + struct psb_gem_object *cursor_pobj; u8 lut_adj[256]; struct psb_intel_framebuffer *fbdev_fb; /* a mode_set for fbdev users on this crtc */ diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index daf75c178c2b..a150a5a4b5d4 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -74,7 +74,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format if (!buf) return 0; - drm_fb_xrgb8888_to_gray8(buf, src, fb, rect); + drm_fb_xrgb8888_to_gray8(buf, 0, src, fb, rect); pix8 = buf; for (y = 0; y < height; y++) { @@ -190,23 +190,23 @@ retry: goto end_cpu_access; } } else if (format->format == DRM_FORMAT_R8) { - drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, rect); } else if (format->format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_rgb332(buf, 0, vaddr, fb, rect); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian()); + drm_fb_xrgb8888_to_rgb565(buf, 0, vaddr, fb, rect, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect); + drm_fb_xrgb8888_to_rgb888(buf, 0, vaddr, fb, rect); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { - drm_fb_swab(buf, vaddr, fb, rect, !import_attach); + drm_fb_swab(buf, 0, vaddr, fb, rect, !import_attach); } else if (compression && !import_attach && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; } else { - drm_fb_memcpy(buf, vaddr, fb, rect); + drm_fb_memcpy(buf, 0, vaddr, fb, rect); } memset(req, 0, sizeof(*req)); diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index 290553e2f6b4..b770f7662830 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -4,7 +4,6 @@ config DRM_HISI_KIRIN depends on DRM && OF && ARM64 select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_MIPI_DSI help Choose this option if you have a hisilicon Kirin chipsets(hi6220). diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 952cfdb1961d..1d556482bb46 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -81,7 +81,7 @@ struct dsi_hw_ctx { struct dw_dsi { struct drm_encoder encoder; - struct drm_bridge *bridge; + struct device *dev; struct mipi_dsi_host host; struct drm_display_mode cur_mode; struct dsi_hw_ctx *ctx; @@ -720,10 +720,13 @@ static int dw_drm_encoder_init(struct device *dev, return 0; } +static const struct component_ops dsi_ops; static int dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *mdsi) { struct dw_dsi *dsi = host_to_dsi(host); + struct device *dev = host->dev; + int ret; if (mdsi->lanes < 1 || mdsi->lanes > 4) { DRM_ERROR("dsi device params invalid\n"); @@ -734,13 +737,20 @@ static int dsi_host_attach(struct mipi_dsi_host *host, dsi->format = mdsi->format; dsi->mode_flags = mdsi->mode_flags; + ret = component_add(dev, &dsi_ops); + if (ret) + return ret; + return 0; } static int dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *mdsi) { - /* do nothing */ + struct device *dev = host->dev; + + component_del(dev, &dsi_ops); + return 0; } @@ -768,7 +778,17 @@ static int dsi_host_init(struct device *dev, struct dw_dsi *dsi) static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi) { struct drm_encoder *encoder = &dsi->encoder; - struct drm_bridge *bridge = dsi->bridge; + struct drm_bridge *bridge; + struct device_node *np = dsi->dev->of_node; + int ret; + + /* + * Get the endpoint node. In our case, dsi has one output port1 + * to which the external HDMI bridge is connected. + */ + ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &bridge); + if (ret) + return ret; /* associate the bridge to dsi encoder */ return drm_bridge_attach(encoder, bridge, NULL, 0); @@ -785,10 +805,6 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - ret = dsi_host_init(dev, dsi); - if (ret) - return ret; - ret = dsi_bridge_init(drm_dev, dsi); if (ret) return ret; @@ -809,17 +825,7 @@ static const struct component_ops dsi_ops = { static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) { struct dsi_hw_ctx *ctx = dsi->ctx; - struct device_node *np = pdev->dev.of_node; struct resource *res; - int ret; - - /* - * Get the endpoint node. In our case, dsi has one output port1 - * to which the external HDMI bridge is connected. - */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge); - if (ret) - return ret; ctx->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(ctx->pclk)) { @@ -852,6 +858,7 @@ static int dsi_probe(struct platform_device *pdev) dsi = &data->dsi; ctx = &data->ctx; dsi->ctx = ctx; + dsi->dev = &pdev->dev; ret = dsi_parse_dt(pdev, dsi); if (ret) @@ -859,12 +866,19 @@ static int dsi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - return component_add(&pdev->dev, &dsi_ops); + ret = dsi_host_init(&pdev->dev, dsi); + if (ret) + return ret; + + return 0; } static int dsi_remove(struct platform_device *pdev) { - component_del(&pdev->dev, &dsi_ops); + struct dsi_data *data = platform_get_drvdata(pdev); + struct dw_dsi *dsi = &data->dsi; + + mipi_dsi_host_unregister(&dsi->host); return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h index 886add4f9cd0..d2d8582b36df 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm.h +++ b/drivers/gpu/drm/hyperv/hyperv_drm.h @@ -46,6 +46,7 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv); int hyperv_update_vram_location(struct hv_device *hdev, phys_addr_t vram_pp); int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, u32 w, u32 h, u32 pitch); +int hyperv_hide_hw_ptr(struct hv_device *hdev); int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect); int hyperv_connect_vsp(struct hv_device *hdev); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index cd818a629183..00e53de4812b 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) { struct drm_device *dev = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = to_hv(dev); + struct pci_dev *pdev; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - vmbus_free_mmio(hv->mem->start, hv->fb_size); + + /* + * Free allocated MMIO memory only on Gen2 VMs. + * On Gen1 VMs, release the PCI device + */ + if (efi_enabled(EFI_BOOT)) { + vmbus_free_mmio(hv->mem->start, hv->fb_size); + } else { + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) { + drm_err(dev, "Unable to find PCI Hyper-V video\n"); + return -ENODEV; + } + pci_release_region(pdev, 0); + pci_dev_put(pdev); + } return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 6dd4717d3e1e..93f51e70a951 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -23,13 +23,16 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb, struct drm_rect *rect) { struct hyperv_drm_device *hv = to_hv(fb->dev); + void __iomem *dst = hv->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ int idx; if (!drm_dev_enter(&hv->dev, &idx)) return -ENODEV; - drm_fb_memcpy_dstclip(hv->vram, fb->pitches[0], vmap, fb, rect); + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect); + drm_dev_exit(idx); return 0; @@ -101,6 +104,7 @@ static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe, struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + hyperv_hide_hw_ptr(hv->hdev); hyperv_update_situation(hv->hdev, 1, hv->screen_depth, crtc_state->mode.hdisplay, crtc_state->mode.vdisplay, diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c index 6d4bdccfbd1a..c0155c6271bf 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c @@ -299,6 +299,55 @@ int hyperv_update_situation(struct hv_device *hdev, u8 active, u32 bpp, return 0; } +/* + * Hyper-V supports a hardware cursor feature. It's not used by Linux VM, + * but the Hyper-V host still draws a point as an extra mouse pointer, + * which is unwanted, especially when Xorg is running. + * + * The hyperv_fb driver uses synthvid_send_ptr() to hide the unwanted + * pointer, by setting msg.ptr_pos.is_visible = 1 and setting the + * msg.ptr_shape.data. Note: setting msg.ptr_pos.is_visible to 0 doesn't + * work in tests. + * + * Copy synthvid_send_ptr() to hyperv_drm and rename it to + * hyperv_hide_hw_ptr(). Note: hyperv_hide_hw_ptr() is also called in the + * handler of the SYNTHVID_FEATURE_CHANGE event, otherwise the host still + * draws an extra unwanted mouse pointer after the VM Connection window is + * closed and reopened. + */ +int hyperv_hide_hw_ptr(struct hv_device *hdev) +{ + struct synthvid_msg msg; + + memset(&msg, 0, sizeof(struct synthvid_msg)); + msg.vid_hdr.type = SYNTHVID_POINTER_POSITION; + msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + + sizeof(struct synthvid_pointer_position); + msg.ptr_pos.is_visible = 1; + msg.ptr_pos.video_output = 0; + msg.ptr_pos.image_x = 0; + msg.ptr_pos.image_y = 0; + hyperv_sendpacket(hdev, &msg); + + memset(&msg, 0, sizeof(struct synthvid_msg)); + msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE; + msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + + sizeof(struct synthvid_pointer_shape); + msg.ptr_shape.part_idx = SYNTHVID_CURSOR_COMPLETE; + msg.ptr_shape.is_argb = 1; + msg.ptr_shape.width = 1; + msg.ptr_shape.height = 1; + msg.ptr_shape.hot_x = 0; + msg.ptr_shape.hot_y = 0; + msg.ptr_shape.data[0] = 0; + msg.ptr_shape.data[1] = 1; + msg.ptr_shape.data[2] = 1; + msg.ptr_shape.data[3] = 1; + hyperv_sendpacket(hdev, &msg); + + return 0; +} + int hyperv_update_dirt(struct hv_device *hdev, struct drm_rect *rect) { struct hyperv_drm_device *hv = hv_get_drvdata(hdev); @@ -392,8 +441,11 @@ static void hyperv_receive_sub(struct hv_device *hdev) return; } - if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) + if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) { hv->dirt_needed = msg->feature_chg.is_dirt_needed; + if (hv->dirt_needed) + hyperv_hide_hw_ptr(hv->hdev); + } } static void hyperv_receive(void *ctx) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index f960f5d7664e..a4c94dc2e216 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -21,7 +21,7 @@ config DRM_I915 select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI select SYNC_FILE - select IOSF_MBI + select IOSF_MBI if X86 select CRC32 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER @@ -126,11 +126,23 @@ config DRM_I915_GVT_KVMGT depends on DRM_I915_GVT depends on KVM depends on VFIO_MDEV + select KVM_EXTERNAL_WRITE_TRACKING default n help Choose this option if you want to enable KVMGT support for Intel GVT-g. +config DRM_I915_PXP + bool "Enable Intel PXP support" + depends on DRM_I915 + depends on INTEL_MEI && INTEL_MEI_PXP + default n + help + PXP (Protected Xe Path) is an i915 component, available on graphics + version 12 and newer GPUs, that helps to establish the hardware + protected session and manage the status of the alive software session, + as well as its life cycle. + menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 642a5b5a1b81..6ddd2d2bbaaf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -13,14 +13,11 @@ # will most likely get a sudden build breakage... Hopefully we will fix # new warnings before CI updates! subdir-ccflags-y := -Wall -Wextra -subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) -subdir-ccflags-y += $(call cc-disable-warning, type-limits) -subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) +subdir-ccflags-y += -Wno-unused-parameter +subdir-ccflags-y += -Wno-type-limits +subdir-ccflags-y += -Wno-missing-field-initializers +subdir-ccflags-y += -Wno-sign-compare subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) -# clang warnings -subdir-ccflags-y += $(call cc-disable-warning, sign-compare) -subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) -subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror @@ -33,7 +30,7 @@ subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! # core driver code -i915-y += i915_drv.o \ +i915-y += i915_driver.o \ i915_config.o \ i915_irq.o \ i915_getparam.o \ @@ -50,18 +47,19 @@ i915-y += i915_drv.o \ intel_dram.o \ intel_memory_region.o \ intel_pch.o \ + intel_pcode.o \ intel_pm.o \ intel_region_ttm.o \ intel_runtime_pm.o \ - intel_sideband.o \ + intel_sbi.o \ intel_step.o \ intel_uncore.o \ intel_wakeref.o \ + vlv_sideband.o \ vlv_suspend.o # core library code i915-y += \ - dma_resv_utils.o \ i915_memcpy.o \ i915_mm.o \ i915_sw_fence.o \ @@ -79,9 +77,6 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) gt-y += \ - gt/debugfs_engines.o \ - gt/debugfs_gt.o \ - gt/debugfs_gt_pm.o \ gt/gen2_engine_cs.o \ gt/gen6_engine_cs.o \ gt/gen6_ppgtt.o \ @@ -101,8 +96,11 @@ gt-y += \ gt/intel_gt.o \ gt/intel_gt_buffer_pool.o \ gt/intel_gt_clock_utils.o \ + gt/intel_gt_debugfs.o \ + gt/intel_gt_engines_debugfs.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ + gt/intel_gt_pm_debugfs.o \ gt/intel_gt_pm_irq.o \ gt/intel_gt_requests.o \ gt/intel_gtt.o \ @@ -155,6 +153,8 @@ gem-y += \ gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ gem/i915_gem_ttm.o \ + gem/i915_gem_ttm_move.o \ + gem/i915_gem_ttm_pm.o \ gem/i915_gem_userptr.o \ gem/i915_gem_wait.o \ gem/i915_gemfs.o @@ -173,6 +173,7 @@ i915-y += \ i915_trace_points.o \ i915_ttm_buddy_manager.o \ i915_vma.o \ + i915_vma_snapshot.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -212,8 +213,11 @@ i915-y += \ display/intel_dpio_phy.o \ display/intel_dpll.o \ display/intel_dpll_mgr.o \ + display/intel_dpt.o \ + display/intel_drrs.o \ display/intel_dsb.o \ display/intel_fb.o \ + display/intel_fb_pin.o \ display/intel_fbc.o \ display/intel_fdi.o \ display/intel_fifo_underrun.o \ @@ -223,6 +227,9 @@ i915-y += \ display/intel_hotplug.o \ display/intel_lpe_audio.o \ display/intel_overlay.o \ + display/intel_pch_display.o \ + display/intel_pch_refclk.o \ + display/intel_plane_initial.o \ display/intel_psr.o \ display/intel_quirks.o \ display/intel_sprite.o \ @@ -248,9 +255,11 @@ i915-y += \ display/g4x_dp.o \ display/g4x_hdmi.o \ display/icl_dsi.o \ + display/intel_backlight.o \ display/intel_crt.o \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ + display/intel_display_trace.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ @@ -278,6 +287,16 @@ i915-y += \ i915-y += i915_perf.o +# Protected execution platform (PXP) support +i915-$(CONFIG_DRM_I915_PXP) += \ + pxp/intel_pxp.o \ + pxp/intel_pxp_cmd.o \ + pxp/intel_pxp_debugfs.o \ + pxp/intel_pxp_irq.o \ + pxp/intel_pxp_pm.o \ + pxp/intel_pxp_session.o \ + pxp/intel_pxp_tee.o + # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index de0f358184aa..f37677df6ebf 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -7,7 +7,9 @@ #include "g4x_dp.h" #include "intel_audio.h" +#include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp.h" @@ -16,9 +18,8 @@ #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" -#include "intel_panel.h" #include "intel_pps.h" -#include "intel_sideband.h" +#include "vlv_sideband.h" struct dp_link_dpll { int clock; @@ -211,7 +212,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); + assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); @@ -251,7 +252,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); + assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); @@ -426,7 +427,6 @@ intel_dp_link_down(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; - u32 DP = intel_dp->DP; if (drm_WARN_ON(&dev_priv->drm, (intel_de_read(dev_priv, intel_dp->output_reg) & @@ -437,17 +437,17 @@ intel_dp_link_down(struct intel_encoder *encoder, if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { - DP &= ~DP_LINK_TRAIN_MASK_CPT; - DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; + intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; + intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; } else { - DP &= ~DP_LINK_TRAIN_MASK; - DP |= DP_LINK_TRAIN_PAT_IDLE; + intel_dp->DP &= ~DP_LINK_TRAIN_MASK; + intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE; } - intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); - DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); - intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); /* @@ -464,14 +464,14 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); /* always enable with pattern 1 (as per spec) */ - DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); - DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | + intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); + intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | DP_LINK_TRAIN_PAT_1; - intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); - DP &= ~DP_PORT_EN; - intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_dp->DP &= ~DP_PORT_EN; + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); @@ -481,8 +481,6 @@ intel_dp_link_down(struct intel_encoder *encoder, msleep(intel_dp->pps.panel_power_down_delay); - intel_dp->DP = DP; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t wakeref; @@ -582,19 +580,18 @@ cpt_set_link_train(struct intel_dp *intel_dp, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 *DP = &intel_dp->DP; - *DP &= ~DP_LINK_TRAIN_MASK_CPT; + intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: - *DP |= DP_LINK_TRAIN_OFF_CPT; + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; break; case DP_TRAINING_PATTERN_1: - *DP |= DP_LINK_TRAIN_PAT_1_CPT; + intel_dp->DP |= DP_LINK_TRAIN_PAT_1_CPT; break; case DP_TRAINING_PATTERN_2: - *DP |= DP_LINK_TRAIN_PAT_2_CPT; + intel_dp->DP |= DP_LINK_TRAIN_PAT_2_CPT; break; default: MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat)); @@ -611,19 +608,18 @@ g4x_set_link_train(struct intel_dp *intel_dp, u8 dp_train_pat) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 *DP = &intel_dp->DP; - *DP &= ~DP_LINK_TRAIN_MASK; + intel_dp->DP &= ~DP_LINK_TRAIN_MASK; switch (intel_dp_training_pattern_symbol(dp_train_pat)) { case DP_TRAINING_PATTERN_DISABLE: - *DP |= DP_LINK_TRAIN_OFF; + intel_dp->DP |= DP_LINK_TRAIN_OFF; break; case DP_TRAINING_PATTERN_1: - *DP |= DP_LINK_TRAIN_PAT_1; + intel_dp->DP |= DP_LINK_TRAIN_PAT_1; break; case DP_TRAINING_PATTERN_2: - *DP |= DP_LINK_TRAIN_PAT_2; + intel_dp->DP |= DP_LINK_TRAIN_PAT_2; break; default: MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat)); @@ -642,7 +638,7 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, /* enable with pattern 1 (as per spec) */ intel_dp_program_link_training_pattern(intel_dp, crtc_state, - DP_TRAINING_PATTERN_1); + DP_PHY_DPRX, DP_TRAINING_PATTERN_1); /* * Magic for VLV/CHV. We _must_ first set up the register @@ -813,10 +809,10 @@ static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp) return DP_TRAIN_PRE_EMPH_LEVEL_3; } -static void vlv_set_signal_levels(struct intel_dp *intel_dp, +static void vlv_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; u8 train_set = intel_dp->train_set[0]; @@ -899,10 +895,10 @@ static void vlv_set_signal_levels(struct intel_dp *intel_dp, uniqtranscale_reg_value, 0); } -static void chv_set_signal_levels(struct intel_dp *intel_dp, +static void chv_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 deemph_reg_value, margin_reg_value; bool uniq_trans_scale = false; u8 train_set = intel_dp->train_set[0]; @@ -1020,10 +1016,11 @@ static u32 g4x_signal_levels(u8 train_set) } static void -g4x_set_signal_levels(struct intel_dp *intel_dp, +g4x_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; @@ -1067,10 +1064,11 @@ static u32 snb_cpu_edp_signal_levels(u8 train_set) } static void -snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, +snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; @@ -1118,10 +1116,11 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set) } static void -ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, +ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u8 train_set = intel_dp->train_set[0]; u32 signal_levels; @@ -1334,7 +1333,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_encoder->get_config = intel_dp_get_config; intel_encoder->sync_state = intel_dp_sync_state; intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check; - intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->update_pipe = intel_backlight_update; intel_encoder->suspend = intel_dp_encoder_suspend; intel_encoder->shutdown = intel_dp_encoder_shutdown; if (IS_CHERRYVIEW(dev_priv)) { @@ -1364,15 +1363,15 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->dp.set_link_train = g4x_set_link_train; if (IS_CHERRYVIEW(dev_priv)) - dig_port->dp.set_signal_levels = chv_set_signal_levels; + intel_encoder->set_signal_levels = chv_set_signal_levels; else if (IS_VALLEYVIEW(dev_priv)) - dig_port->dp.set_signal_levels = vlv_set_signal_levels; + intel_encoder->set_signal_levels = vlv_set_signal_levels; else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) - dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; + intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels; else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A) - dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; + intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels; else - dig_port->dp.set_signal_levels = g4x_set_signal_levels; + intel_encoder->set_signal_levels = g4x_set_signal_levels; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index be352e9f0afc..06e00b1eaa7c 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -8,14 +8,15 @@ #include "g4x_hdmi.h" #include "intel_audio.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" -#include "intel_sideband.h" #include "intel_sdvo.h" +#include "vlv_sideband.h" static void intel_hdmi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) @@ -584,6 +585,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, else intel_encoder->enable = g4x_enable_hdmi; } + intel_encoder->shutdown = intel_hdmi_encoder_shutdown; intel_encoder->type = INTEL_OUTPUT_HDMI; intel_encoder->power_domain = intel_port_to_power_domain(port); diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index b1439ba78f67..85950ff67609 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_sprite.h" #include "i9xx_plane.h" @@ -60,22 +61,11 @@ static const u32 vlv_primary_formats[] = { DRM_FORMAT_XBGR16161616F, }; -static const u64 i9xx_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -92,13 +82,8 @@ static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, static bool i965_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -136,6 +121,15 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, return i9xx_plane == PLANE_A; } +static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) + return dev_priv->fbc; + else + return NULL; +} + static bool i9xx_plane_has_windowing(struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -272,7 +266,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) u32 alignment = intel_surf_alignment(fb, 0); int cpp = fb->format->cpp[0]; - while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) { + while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -418,38 +412,25 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void i9xx_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 linear_offset; - int x = plane_state->view.color_plane[0].x; - int y = plane_state->view.color_plane[0].y; - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - int crtc_w = drm_rect_width(&plane_state->uapi.dst); - int crtc_h = drm_rect_height(&plane_state->uapi.dst); unsigned long irqflags; - u32 dspaddr_offset; - u32 dspcntr; - - dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - if (DISPLAY_VER(dev_priv) >= 4) - dspaddr_offset = plane_state->view.color_plane[0].offset; - else - dspaddr_offset = linear_offset; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); if (DISPLAY_VER(dev_priv) < 4) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + /* * PLANE_A doesn't actually have a full window * generator but let's assume we still need to @@ -459,7 +440,39 @@ static void i9xx_update_plane(struct intel_plane *plane, (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), ((crtc_h - 1) << 16) | (crtc_w - 1)); - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i9xx_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + int x = plane_state->view.color_plane[0].x; + int y = plane_state->view.color_plane[0].y; + u32 dspcntr, dspaddr_offset, linear_offset; + unsigned long irqflags; + + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (DISPLAY_VER(dev_priv) >= 4) + dspaddr_offset = plane_state->view.color_plane[0].offset; + else + dspaddr_offset = linear_offset; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), @@ -493,8 +506,22 @@ static void i9xx_update_plane(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i830_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + /* + * On i830/i845 all registers are self-arming [ALM040]. + * + * Additional breakage on i830 causes register reads to return + * the last latched value instead of the last written value [ALM026]. + */ + i9xx_plane_update_noarm(plane, crtc_state, plane_state); + i9xx_plane_update_arm(plane, crtc_state, plane_state); +} + +static void i9xx_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; @@ -768,6 +795,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; + const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; @@ -789,12 +817,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->id = PLANE_PRIMARY; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + intel_fbc_add_plane(i9xx_plane_fbc(dev_priv, plane->i9xx_plane), plane); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { formats = vlv_primary_formats; @@ -851,8 +874,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->max_stride = ilk_primary_max_stride; } - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; + if (IS_I830(dev_priv) || IS_I845G(dev_priv)) { + plane->update_arm = i830_plane_update_arm; + } else { + plane->update_noarm = i9xx_plane_update_noarm; + plane->update_arm = i9xx_plane_update_arm; + } + plane->disable_arm = i9xx_plane_disable_arm; plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; @@ -875,21 +903,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->disable_flip_done = ilk_primary_disable_flip_done; } + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(plane->i9xx_plane)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 43ec7fcd3f5d..5781e9fac8b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -28,13 +28,16 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> +#include "icl_dsi.h" #include "intel_atomic.h" +#include "intel_backlight.h" #include "intel_combo_phy.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" #include "skl_scaler.h" @@ -54,20 +57,28 @@ static int payload_credits_available(struct drm_i915_private *dev_priv, >> FREE_PLOAD_CREDIT_SHIFT; } -static void wait_for_header_credits(struct drm_i915_private *dev_priv, - enum transcoder dsi_trans) +static bool wait_for_header_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans, int hdr_credit) { if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= - MAX_HEADER_CREDIT, 100)) + hdr_credit, 100)) { drm_err(&dev_priv->drm, "DSI header credits not released\n"); + return false; + } + + return true; } -static void wait_for_payload_credits(struct drm_i915_private *dev_priv, - enum transcoder dsi_trans) +static bool wait_for_payload_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans, int payld_credit) { if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= - MAX_PLOAD_CREDIT, 100)) + payld_credit, 100)) { drm_err(&dev_priv->drm, "DSI payload credits not released\n"); + return false; + } + + return true; } static enum transcoder dsi_port_to_transcoder(enum port port) @@ -90,8 +101,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for header/payload credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans); - wait_for_payload_credits(dev_priv, dsi_trans); + wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); + wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT); } /* send nop DCS command */ @@ -108,7 +119,7 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for header credits to be released */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - wait_for_header_credits(dev_priv, dsi_trans); + wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT); } /* wait for LP TX in progress bit to be cleared */ @@ -120,54 +131,52 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) } } -static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, - u32 len) +static int dsi_send_pkt_payld(struct intel_dsi_host *host, + const struct mipi_dsi_packet *packet) { struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); - int free_credits; + const u8 *data = packet->payload; + u32 len = packet->payload_length; int i, j; + /* payload queue can accept *256 bytes*, check limit */ + if (len > MAX_PLOAD_CREDIT * 4) { + drm_err(&i915->drm, "payload size exceeds max queue limit\n"); + return -EINVAL; + } + for (i = 0; i < len; i += 4) { u32 tmp = 0; - free_credits = payload_credits_available(dev_priv, dsi_trans); - if (free_credits < 1) { - drm_err(&dev_priv->drm, - "Payload credit not available\n"); - return false; - } + if (!wait_for_payload_credits(i915, dsi_trans, 1)) + return -EBUSY; for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; - intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp); + intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp); } - return true; + return 0; } static int dsi_send_pkt_hdr(struct intel_dsi_host *host, - struct mipi_dsi_packet pkt, bool enable_lpdt) + const struct mipi_dsi_packet *packet, + bool enable_lpdt) { struct intel_dsi *intel_dsi = host->intel_dsi; struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); u32 tmp; - int free_credits; - /* check if header credit available */ - free_credits = header_credits_available(dev_priv, dsi_trans); - if (free_credits < 1) { - drm_err(&dev_priv->drm, - "send pkt header failed, not enough hdr credits\n"); - return -1; - } + if (!wait_for_header_credits(dev_priv, dsi_trans, 1)) + return -EBUSY; tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); - if (pkt.payload) + if (packet->payload) tmp |= PAYLOAD_PRESENT; else tmp &= ~PAYLOAD_PRESENT; @@ -176,39 +185,19 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, if (enable_lpdt) tmp |= LP_DATA_TRANSFER; + else + tmp &= ~LP_DATA_TRANSFER; tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); - tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT); - tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT); - tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT); - tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT); + tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT); + tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT); + tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT); + tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT); intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } -static int dsi_send_pkt_payld(struct intel_dsi_host *host, - struct mipi_dsi_packet pkt) -{ - struct intel_dsi *intel_dsi = host->intel_dsi; - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); - - /* payload queue can accept *256 bytes*, check limit */ - if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) { - drm_err(&i915->drm, "payload size exceeds max queue limit\n"); - return -1; - } - - /* load data into command payload queue */ - if (!add_payld_to_queue(host, pkt.payload, - pkt.payload_length)) { - drm_err(&i915->drm, "adding payload to queue failed\n"); - return -1; - } - - return 0; -} - void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -248,7 +237,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; @@ -262,7 +251,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) tmp |= RTERM_SELECT(0x6); intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); @@ -470,7 +459,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); @@ -485,7 +474,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) tmp); tmp = intel_de_read(dev_priv, - ICL_PORT_PCS_DW1_LN0(phy)); + ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), @@ -504,7 +493,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); @@ -525,7 +514,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); @@ -538,7 +527,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); @@ -711,10 +700,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { - if (DISPLAY_VER(dev_priv) >= 12) - val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - else - val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); @@ -1150,8 +1136,6 @@ static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); @@ -1177,8 +1161,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_configure_transcoder(encoder, crtc_state); /* Step 4l: Gate DDI clocks */ - if (DISPLAY_VER(dev_priv) == 11) - gen11_dsi_gate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) @@ -1247,7 +1230,9 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, /* step5: program and powerup panel */ gen11_dsi_powerup_panel(encoder); - intel_dsc_enable(encoder, pipe_config); + intel_dsc_dsi_pps_write(encoder, pipe_config); + + intel_dsc_enable(pipe_config); /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); @@ -1270,6 +1255,27 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder, IGNORE_KVMR_PIPE_A, enable ? IGNORE_KVMR_PIPE_A : 0); } + +/* + * Wa_16012360555:adl-p + * SW will have to program the "LP to HS Wakeup Guardband" + * to account for the repeaters on the HS Request/Ready + * PPI signaling between the Display engine and the DPHY. + */ +static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + enum port port; + + if (DISPLAY_VER(i915) == 13) { + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), + TGL_DSI_CHKN_LSHS_GB_MASK, + TGL_DSI_CHKN_LSHS_GB(4)); + } +} + static void gen11_dsi_enable(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -1283,11 +1289,14 @@ static void gen11_dsi_enable(struct intel_atomic_state *state, /* Wa_1409054076:icl,jsl,ehl */ icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true); + /* Wa_16012360555:adl-p */ + adlp_set_lp_hs_wakeup_gb(encoder); + /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); /* step7: enable backlight */ - intel_panel_enable_backlight(crtc_state, conn_state); + intel_backlight_enable(crtc_state, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); intel_crtc_vblank_on(crtc_state); @@ -1440,7 +1449,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state, /* step1: turn off backlight */ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); - intel_panel_disable_backlight(old_conn_state); + intel_backlight_disable(old_conn_state); /* step2d,e: disable transcoder and wait */ gen11_dsi_disable_transcoder(encoder); @@ -1577,8 +1586,14 @@ static void gen11_dsi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum pipe pipe = intel_crtc->pipe; + struct intel_crtc *intel_crtc; + enum pipe pipe; + + if (!crtc_state) + return; + + intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + pipe = intel_crtc->pipe; /* wa verify 1409054076:icl,jsl,ehl */ if (DISPLAY_VER(dev_priv) == 11 && pipe == PIPE_B && @@ -1614,7 +1629,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -1644,16 +1659,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - const struct drm_display_mode *fixed_mode = - intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - intel_fixed_panel_mode(fixed_mode, adjusted_mode); - ret = intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_panel_compute_config(intel_connector, adjusted_mode); + if (ret) + return ret; + + ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; @@ -1809,18 +1825,18 @@ static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, if (msg->flags & MIPI_DSI_MSG_USE_LPM) enable_lpdt = true; - /* send packet header */ - ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt); - if (ret < 0) - return ret; - /* only long packet contains payload */ if (mipi_dsi_packet_format_is_long(msg->type)) { - ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt); + ret = dsi_send_pkt_payld(intel_dsi_host, &dsi_pkt); if (ret < 0) return ret; } + /* send packet header */ + ret = dsi_send_pkt_hdr(intel_dsi_host, &dsi_pkt, enable_lpdt); + if (ret < 0) + return ret; + //TODO: add payload receive code if needed ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; @@ -2008,7 +2024,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->port = port; encoder->get_config = gen11_dsi_get_config; encoder->sync_state = gen11_dsi_sync_state; - encoder->update_pipe = intel_panel_update_backlight; + encoder->update_pipe = intel_backlight_update; encoder->compute_config = gen11_dsi_compute_config; encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->initial_fastset_check = gen11_dsi_initial_fastset_check; @@ -2042,7 +2058,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) } intel_panel_init(&intel_connector->panel, fixed_mode, NULL); - intel_panel_setup_backlight(connector, INVALID_PIPE); + intel_backlight_setup(intel_connector, INVALID_PIPE); if (dev_priv->vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h new file mode 100644 index 000000000000..b4861b56b5b2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __ICL_DSI_H__ +#define __ICL_DSI_H__ + +struct drm_i915_private; +struct intel_crtc_state; + +void icl_dsi_init(struct drm_i915_private *i915); +void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); + +#endif /* __ICL_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 7cfe91fc05f2..e78430001f07 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); acpi_handle dhandle; + union acpi_object *obj; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return; - acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID, - INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL); + obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID, + INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL); + if (obj) + ACPI_FREE(obj); } /* @@ -282,3 +285,49 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv) } drm_connector_list_iter_end(&conn_iter); } + +/* NOTE: The connector order must be final before this is called. */ +void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) +{ + struct drm_connector_list_iter conn_iter; + struct drm_device *drm_dev = &i915->drm; + struct fwnode_handle *fwnode = NULL; + struct drm_connector *connector; + struct acpi_device *adev; + + drm_connector_list_iter_begin(drm_dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + /* Always getting the next, even when the last was not used. */ + fwnode = device_get_next_child_node(drm_dev->dev, fwnode); + if (!fwnode) + break; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_DSI: + /* + * Integrated displays have a specific address 0x1f on + * most Intel platforms, but not on all of them. + */ + adev = acpi_find_child_device(ACPI_COMPANION(drm_dev->dev), + 0x1f, 0); + if (adev) { + connector->fwnode = + fwnode_handle_get(acpi_fwnode_handle(adev)); + break; + } + fallthrough; + default: + connector->fwnode = fwnode_handle_get(fwnode); + break; + } + } + drm_connector_list_iter_end(&conn_iter); + /* + * device_get_next_child_node() takes a reference on the fwnode, if + * we stopped iterating because we are out of connectors we need to + * put this, otherwise fwnode is NULL and the put is a no-op. + */ + fwnode_handle_put(fwnode); +} diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h index 9f197401c313..4a760a2baed9 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.h +++ b/drivers/gpu/drm/i915/display/intel_acpi.h @@ -13,6 +13,7 @@ void intel_register_dsm_handler(void); void intel_unregister_dsm_handler(void); void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915); void intel_acpi_device_id_update(struct drm_i915_private *i915); +void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915); #else static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } @@ -20,6 +21,8 @@ static inline void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; } static inline void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; } +static inline +void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; } #endif /* CONFIG_ACPI */ #endif /* __INTEL_ACPI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index b4e7ac51aa31..a62550711e98 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -139,6 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode || + new_conn_state->base.privacy_screen_sw_state != old_conn_state->base.privacy_screen_sw_state || !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) crtc_state->mode_changed = true; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 47234d898549..89005628cc3a 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,10 +35,14 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> -#include "i915_trace.h" +#include "gt/intel_rps.h" + #include "intel_atomic_plane.h" #include "intel_cdclk.h" +#include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_fb_pin.h" #include "intel_pm.h" #include "intel_sprite.h" @@ -392,7 +396,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); const struct intel_plane_state *new_master_plane_state; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = @@ -467,31 +471,72 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_plane_update_noarm(&plane->base, crtc); + + if (plane->update_noarm) + plane->update_noarm(plane, crtc_state, plane_state); +} + +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_update_plane(&plane->base, crtc); + trace_intel_plane_update_arm(&plane->base, crtc); if (crtc_state->uapi.async_flip && plane->async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else - plane->update_plane(plane, crtc_state, plane_state); + plane->update_arm(plane, crtc_state, plane_state); } -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc_state); + trace_intel_plane_disable_arm(&plane->base, crtc); + plane->disable_arm(plane, crtc_state); } -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + u32 update_mask = new_crtc_state->update_planes; + struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + if (new_crtc_state->uapi.async_flip) + return; + + /* + * Since we only write non-arming registers here, + * the order does not matter even for skl+. + */ + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; + + /* TODO: for mailbox updates this should be skipped */ + if (new_plane_state->uapi.visible || + new_plane_state->planar_slave) + intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + } +} + +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -513,17 +558,20 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible || - new_plane_state->planar_slave) { - intel_update_plane(plane, new_crtc_state, new_plane_state); - } else { - intel_disable_plane(plane, new_crtc_state); - } + new_plane_state->planar_slave) + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + else + intel_plane_disable_arm(plane, new_crtc_state); } } -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -537,10 +585,14 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible) - intel_update_plane(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); else - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); } } @@ -601,6 +653,214 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, return 0; } +struct wait_rps_boost { + struct wait_queue_entry wait; + + struct drm_crtc *crtc; + struct i915_request *request; +}; + +static int do_rps_boost(struct wait_queue_entry *_wait, + unsigned mode, int sync, void *key) +{ + struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); + struct i915_request *rq = wait->request; + + /* + * If we missed the vblank, but the request is already running it + * is reasonable to assume that it will complete before the next + * vblank without our intervention, so leave RPS alone. + */ + if (!i915_request_started(rq)) + intel_rps_boost(rq); + i915_request_put(rq); + + drm_crtc_vblank_put(wait->crtc); + + list_del(&wait->wait.entry); + kfree(wait); + return 1; +} + +static void add_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence) +{ + struct wait_rps_boost *wait; + + if (!dma_fence_is_i915(fence)) + return; + + if (DISPLAY_VER(to_i915(crtc->dev)) < 6) + return; + + if (drm_crtc_vblank_get(crtc)) + return; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) { + drm_crtc_vblank_put(crtc); + return; + } + + wait->request = to_request(dma_fence_get(fence)); + wait->crtc = crtc; + + wait->wait.func = do_rps_boost; + wait->wait.flags = 0; + + add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); +} + +/** + * intel_prepare_plane_fb - Prepare fb for usage on plane + * @_plane: drm plane to prepare for + * @_new_plane_state: the plane state being prepared + * + * Prepares a framebuffer for usage on a display plane. Generally this + * involves pinning the underlying object and updating the frontbuffer tracking + * bits. Some older platforms need special physical address handling for + * cursor planes. + * + * Returns 0 on success, negative error code on failure. + */ +static int +intel_prepare_plane_fb(struct drm_plane *_plane, + struct drm_plane_state *_new_plane_state) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; + struct intel_plane *plane = to_intel_plane(_plane); + struct intel_plane_state *new_plane_state = + to_intel_plane_state(_new_plane_state); + struct intel_atomic_state *state = + to_intel_atomic_state(new_plane_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); + int ret; + + if (old_obj) { + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, + to_intel_crtc(old_plane_state->hw.crtc)); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. Note that we rely on userspace rendering + * into the buffer attached to the pipe they are waiting + * on. If not, userspace generates a GPU hang with IPEHR + * point to the MI_WAIT_FOR_EVENT. + * + * This should only fail upon a hung GPU, in which case we + * can safely continue. + */ + if (intel_crtc_needs_modeset(crtc_state)) { + ret = i915_sw_fence_await_reservation(&state->commit_ready, + old_obj->base.resv, NULL, + false, 0, + GFP_KERNEL); + if (ret < 0) + return ret; + } + } + + if (new_plane_state->uapi.fence) { /* explicit fencing */ + i915_gem_fence_wait_priority(new_plane_state->uapi.fence, + &attr); + ret = i915_sw_fence_await_dma_fence(&state->commit_ready, + new_plane_state->uapi.fence, + i915_fence_timeout(dev_priv), + GFP_KERNEL); + if (ret < 0) + return ret; + } + + if (!obj) + return 0; + + + ret = intel_plane_pin_fb(new_plane_state); + if (ret) + return ret; + + i915_gem_object_wait_priority(obj, 0, &attr); + + if (!new_plane_state->uapi.fence) { /* implicit fencing */ + struct dma_resv_iter cursor; + struct dma_fence *fence; + + ret = i915_sw_fence_await_reservation(&state->commit_ready, + obj->base.resv, NULL, + false, + i915_fence_timeout(dev_priv), + GFP_KERNEL); + if (ret < 0) + goto unpin_fb; + + dma_resv_iter_begin(&cursor, obj->base.resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + add_rps_boost_after_vblank(new_plane_state->hw.crtc, + fence); + } + dma_resv_iter_end(&cursor); + } else { + add_rps_boost_after_vblank(new_plane_state->hw.crtc, + new_plane_state->uapi.fence); + } + + /* + * We declare pageflips to be interactive and so merit a small bias + * towards upclocking to deliver the frame on time. By only changing + * the RPS thresholds to sample more regularly and aim for higher + * clocks we can hopefully deliver low power workloads (like kodi) + * that are not quite steady state without resorting to forcing + * maximum clocks following a vblank miss (see do_rps_boost()). + */ + if (!state->rps_interactive) { + intel_rps_mark_interactive(&dev_priv->gt.rps, true); + state->rps_interactive = true; + } + + return 0; + +unpin_fb: + intel_plane_unpin_fb(new_plane_state); + + return ret; +} + +/** + * intel_cleanup_plane_fb - Cleans up an fb after plane use + * @plane: drm plane to clean up for + * @_old_plane_state: the state from the previous modeset + * + * Cleans up a framebuffer that has just been removed from a plane. + */ +static void +intel_cleanup_plane_fb(struct drm_plane *plane, + struct drm_plane_state *_old_plane_state) +{ + struct intel_plane_state *old_plane_state = + to_intel_plane_state(_old_plane_state); + struct intel_atomic_state *state = + to_intel_atomic_state(old_plane_state->uapi.state); + struct drm_i915_private *dev_priv = to_i915(plane->dev); + struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); + + if (!obj) + return; + + if (state->rps_interactive) { + intel_rps_mark_interactive(&dev_priv->gt.rps, false); + state->rps_interactive = false; + } + + /* Should only be called after a successful intel_prepare_plane_fb()! */ + intel_plane_unpin_fb(old_plane_state); +} + static const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 62e5a2a77fd4..7907f601598e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -30,20 +30,25 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 532237588511..3bdca0fe2cee 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -31,6 +31,7 @@ #include "intel_atomic.h" #include "intel_audio.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -62,6 +63,15 @@ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. */ +struct intel_audio_funcs { + void (*audio_codec_enable)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + void (*audio_codec_disable)(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); +}; + /* DP N/M table */ #define LC_810M 810000 #define LC_540M 540000 @@ -388,7 +398,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; @@ -436,7 +446,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; @@ -494,7 +504,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n", transcoder_name(cpu_transcoder)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Disable timestamps */ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); @@ -512,7 +522,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, @@ -641,7 +651,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, "Enable audio codec on transcoder %s, %u bytes ELD\n", transcoder_name(cpu_transcoder), drm_eld_size(eld)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) @@ -679,7 +689,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder, @@ -826,7 +836,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = @@ -848,17 +858,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->display.audio_codec_enable) - dev_priv->display.audio_codec_enable(encoder, - crtc_state, - conn_state); + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_enable(encoder, + crtc_state, + conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = connector; /* referred in audio callbacks */ - dev_priv->av_enc_map[pipe] = encoder; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = encoder; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -888,20 +898,20 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; - if (dev_priv->display.audio_codec_disable) - dev_priv->display.audio_codec_disable(encoder, - old_crtc_state, - old_conn_state); + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_disable(encoder, + old_crtc_state, + old_conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = NULL; - dev_priv->av_enc_map[pipe] = NULL; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = NULL; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -915,24 +925,69 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false); } +static const struct intel_audio_funcs g4x_audio_funcs = { + .audio_codec_enable = g4x_audio_codec_enable, + .audio_codec_disable = g4x_audio_codec_disable, +}; + +static const struct intel_audio_funcs ilk_audio_funcs = { + .audio_codec_enable = ilk_audio_codec_enable, + .audio_codec_disable = ilk_audio_codec_disable, +}; + +static const struct intel_audio_funcs hsw_audio_funcs = { + .audio_codec_enable = hsw_audio_codec_enable, + .audio_codec_disable = hsw_audio_codec_disable, +}; + /** - * intel_init_audio_hooks - Set up chip specific audio hooks + * intel_audio_hooks_init - Set up chip specific audio hooks * @dev_priv: device private */ -void intel_init_audio_hooks(struct drm_i915_private *dev_priv) +void intel_audio_hooks_init(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) { - dev_priv->display.audio_codec_enable = g4x_audio_codec_enable; - dev_priv->display.audio_codec_disable = g4x_audio_codec_disable; + dev_priv->audio.funcs = &g4x_audio_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; - dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; + dev_priv->audio.funcs = &ilk_audio_funcs; } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { - dev_priv->display.audio_codec_enable = hsw_audio_codec_enable; - dev_priv->display.audio_codec_disable = hsw_audio_codec_disable; + dev_priv->audio.funcs = &hsw_audio_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; - dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; + dev_priv->audio.funcs = &ilk_audio_funcs; + } +} + +struct aud_ts_cdclk_m_n { + u8 m; + u16 n; +}; + +void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0); +} + +static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) +{ + if (refclk == 24000) + aud_ts->m = 12; + else + aud_ts->m = 15; + + aud_ts->n = cdclk * aud_ts->m / 24000; +} + +void intel_audio_cdclk_change_post(struct drm_i915_private *i915) +{ + struct aud_ts_cdclk_m_n aud_ts; + + if (DISPLAY_VER(i915) >= 13) { + get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); + + intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); + intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); + drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n); } } @@ -965,7 +1020,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, struct intel_crtc *crtc; int ret; - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return; @@ -1003,13 +1058,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); - if (dev_priv->audio_power_refcount++ == 0) { + if (dev_priv->audio.power_refcount++ == 0) { if (DISPLAY_VER(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); drm_dbg_kms(&dev_priv->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -1030,7 +1085,7 @@ static void i915_audio_component_put_power(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ - if (--dev_priv->audio_power_refcount == 0) + if (--dev_priv->audio.power_refcount == 0) if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); @@ -1082,7 +1137,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) /* * get the intel_encoder according to the parameter port and pipe * intel_encoder is saved by the index of pipe - * MST & (pipe >= 0): return the av_enc_map[pipe], + * MST & (pipe >= 0): return the audio.encoder_map[pipe], * when port is matched * MST & (pipe < 0): this is invalid * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) @@ -1097,10 +1152,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { if (drm_WARN_ON(&dev_priv->drm, - pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) + pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) return NULL; - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; /* * when bootup, audio driver may not know it is * MST or not. So it will poll all the port & pipe @@ -1116,7 +1171,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, return NULL; for_each_pipe(dev_priv, pipe) { - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; if (encoder == NULL) continue; @@ -1134,7 +1189,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int pipe, int rate) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; @@ -1144,7 +1199,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, return 0; cookie = i915_audio_component_get_power(kdev); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); @@ -1163,7 +1218,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, hsw_audio_config_update(encoder, crtc->config); unlock: - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } @@ -1177,13 +1232,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, const u8 *eld; int ret = -EINVAL; - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", port_name(port)); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1195,7 +1250,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, memcpy(buf, eld, min(max_bytes, ret)); } - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1230,7 +1285,7 @@ static int i915_audio_component_bind(struct device *i915_kdev, BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; - dev_priv->audio_component = acomp; + dev_priv->audio.component = acomp; drm_modeset_unlock_all(&dev_priv->drm); return 0; @@ -1245,14 +1300,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; - dev_priv->audio_component = NULL; + dev_priv->audio.component = NULL; drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); - if (dev_priv->audio_power_refcount) + if (dev_priv->audio.power_refcount) drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", - dev_priv->audio_power_refcount); + dev_priv->audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { @@ -1308,17 +1363,21 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) else aud_freq = aud_freq_init; - /* use BIOS provided value for TGL unless it is a known bad value */ - if (IS_TIGERLAKE(dev_priv) && aud_freq_init != AUD_FREQ_TGL_BROKEN) + /* use BIOS provided value for TGL and RKL unless it is a known bad value */ + if ((IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv)) && + aud_freq_init != AUD_FREQ_TGL_BROKEN) aud_freq = aud_freq_init; drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); - dev_priv->audio_freq_cntrl = aud_freq; + dev_priv->audio.freq_cntrl = aud_freq; } - dev_priv->audio_component_registered = true; + /* init with current cdclk */ + intel_audio_cdclk_change_post(dev_priv); + + dev_priv->audio.component_registered = true; } /** @@ -1330,11 +1389,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) */ static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) { - if (!dev_priv->audio_component_registered) + if (!dev_priv->audio.component_registered) return; component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); - dev_priv->audio_component_registered = false; + dev_priv->audio.component_registered = false; } /** @@ -1356,7 +1415,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv) */ void intel_audio_deinit(struct drm_i915_private *dev_priv) { - if ((dev_priv)->lpe_audio.platdev != NULL) + if ((dev_priv)->audio.lpe.platdev != NULL) intel_lpe_audio_teardown(dev_priv); else i915_audio_component_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h index a3657c7a7ba2..63b22131dc45 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.h +++ b/drivers/gpu/drm/i915/display/intel_audio.h @@ -11,13 +11,15 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_encoder; -void intel_init_audio_hooks(struct drm_i915_private *dev_priv); +void intel_audio_hooks_init(struct drm_i915_private *dev_priv); void intel_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); +void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv); +void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv); void intel_audio_init(struct drm_i915_private *dev_priv); void intel_audio_deinit(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c new file mode 100644 index 000000000000..9523411cddd8 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -0,0 +1,1776 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/kernel.h> +#include <linux/pwm.h> + +#include "intel_backlight.h" +#include "intel_connector.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_dp_aux_backlight.h" +#include "intel_dsi_dcs_backlight.h" +#include "intel_panel.h" + +/** + * scale - scale values from one range to another + * @source_val: value in range [@source_min..@source_max] + * @source_min: minimum legal value for @source_val + * @source_max: maximum legal value for @source_val + * @target_min: corresponding target value for @source_min + * @target_max: corresponding target value for @source_max + * + * Return @source_val in range [@source_min..@source_max] scaled to range + * [@target_min..@target_max]. + */ +static u32 scale(u32 source_val, + u32 source_min, u32 source_max, + u32 target_min, u32 target_max) +{ + u64 target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, + target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +/* + * Scale user_level in range [0..user_max] to [0..hw_max], clamping the result + * to [hw_min..hw_max]. + */ +static u32 clamp_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + u32 hw_level; + + hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max); + hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max); + + return hw_level; +} + +/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */ +static u32 scale_hw_to_user(struct intel_connector *connector, + u32 hw_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + + return scale(hw_level, panel->backlight.min, panel->backlight.max, + 0, user_max); +} + +u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); + + if (dev_priv->params.invert_brightness < 0) + return val; + + if (dev_priv->params.invert_brightness > 0 || + dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { + return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; + } + + return val; +} + +void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val); + panel->backlight.pwm_funcs->set(conn_state, val); +} + +u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + drm_WARN_ON_ONCE(&dev_priv->drm, + panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); + + val = scale(val, panel->backlight.min, panel->backlight.max, + panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); + + return intel_backlight_invert_pwm_level(connector, val); +} + +u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + drm_WARN_ON_ONCE(&dev_priv->drm, + panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); + + if (dev_priv->params.invert_brightness > 0 || + (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)) + val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); + + return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, + panel->backlight.min, panel->backlight.max); +} + +static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; +} + +static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; +} + +static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 val; + + val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + if (DISPLAY_VER(dev_priv) < 4) + val >>= 1; + + if (panel->backlight.combination_mode) { + u8 lbpc; + + pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc); + val *= lbpc; + } + + return val; +} + +static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) + return 0; + + return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; +} + +static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + return intel_de_read(dev_priv, + BXT_BLC_PWM_DUTY(panel->backlight.controller)); +} + +static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct intel_panel *panel = &connector->panel; + struct pwm_state state; + + pwm_get_state(panel->backlight.pwm, &state); + return pwm_get_relative_duty_cycle(&state, 100); +} + +static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level); +} + +static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + u32 tmp; + + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level); +} + +static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 tmp, mask; + + drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); + + if (panel->backlight.combination_mode) { + u8 lbpc; + + lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1; + level /= lbpc; + pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc); + } + + if (DISPLAY_VER(dev_priv) == 4) { + mask = BACKLIGHT_DUTY_CYCLE_MASK; + } else { + level <<= 1; + mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV; + } + + tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask; + intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level); +} + +static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; + u32 tmp; + + tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level); +} + +static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + intel_de_write(dev_priv, + BXT_BLC_PWM_DUTY(panel->backlight.controller), level); +} + +static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; + + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); +} + +static void +intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level); + + panel->backlight.funcs->set(conn_state, level); +} + +/* set backlight brightness to level in range [0..max], assuming hw min is + * respected. + */ +void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, + u32 user_level, u32 user_max) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 hw_level; + + /* + * Lack of crtc may occur during driver init because + * connection_mutex isn't held across the entire backlight + * setup + modeset readout, and the BIOS can issue the + * requests at any time. + */ + if (!panel->backlight.present || !conn_state->crtc) + return; + + mutex_lock(&dev_priv->backlight_lock); + + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); + + hw_level = clamp_user_to_hw(connector, user_level, user_max); + panel->backlight.level = hw_level; + + if (panel->backlight.device) + panel->backlight.device->props.brightness = + scale_hw_to_user(connector, + panel->backlight.level, + panel->backlight.device->props.max_brightness); + + if (panel->backlight.enabled) + intel_panel_actually_set_backlight(conn_state, hw_level); + + mutex_unlock(&dev_priv->backlight_lock); +} + +static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, level); + + /* + * Although we don't support or enable CPU PWM with LPT/SPT based + * systems, it may have been enabled prior to loading the + * driver. Disable to avoid warnings on LCPLL disable. + * + * This needs rework if we need to add support for CPU PWM on PCH split + * platforms. + */ + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + if (tmp & BLM_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, + "cpu backlight was enabled, disabling\n"); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, + tmp & ~BLM_PWM_ENABLE); + } + + tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); +} + +static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, val); + + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); + + tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); +} + +static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + intel_backlight_set_pwm_level(old_conn_state, val); +} + +static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev); + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, val); + + tmp = intel_de_read(dev_priv, BLC_PWM_CTL2); + intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); +} + +static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, val); + + tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), + tmp & ~BLM_PWM_ENABLE); +} + +static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, val); + + tmp = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + tmp & ~BXT_BLC_PWM_ENABLE); + + if (panel->backlight.controller == 1) { + val = intel_de_read(dev_priv, UTIL_PIN_CTL); + val &= ~UTIL_PIN_ENABLE; + intel_de_write(dev_priv, UTIL_PIN_CTL, val); + } +} + +static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 tmp; + + intel_backlight_set_pwm_level(old_conn_state, val); + + tmp = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + tmp & ~BXT_BLC_PWM_ENABLE); +} + +static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct intel_panel *panel = &connector->panel; + + panel->backlight.pwm_state.enabled = false; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); +} + +void intel_backlight_disable(const struct drm_connector_state *old_conn_state) +{ + struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + if (!panel->backlight.present) + return; + + /* + * Do not disable backlight on the vga_switcheroo path. When switching + * away from i915, the other client may depend on i915 to handle the + * backlight. This will leave the backlight on unnecessarily when + * another client is not activated. + */ + if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { + drm_dbg_kms(&dev_priv->drm, + "Skipping backlight disable on vga switch\n"); + return; + } + + mutex_lock(&dev_priv->backlight_lock); + + if (panel->backlight.device) + panel->backlight.device->props.power = FB_BLANK_POWERDOWN; + panel->backlight.enabled = false; + panel->backlight.funcs->disable(old_conn_state, 0); + + mutex_unlock(&dev_priv->backlight_lock); +} + +static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 pch_ctl1, pch_ctl2, schicken; + + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); + pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + } + + if (HAS_PCH_LPT(dev_priv)) { + schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2); + if (panel->backlight.alternate_pwm_increment) + schicken |= LPT_PWM_GRANULARITY; + else + schicken &= ~LPT_PWM_GRANULARITY; + intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken); + } else { + schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1); + if (panel->backlight.alternate_pwm_increment) + schicken |= SPT_PWM_GRANULARITY; + else + schicken &= ~SPT_PWM_GRANULARITY; + intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken); + } + + pch_ctl2 = panel->backlight.pwm_level_max << 16; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); + + pch_ctl1 = 0; + if (panel->backlight.active_low_pwm) + pch_ctl1 |= BLM_PCH_POLARITY; + + /* After LPT, override is the default. */ + if (HAS_PCH_LPT(dev_priv)) + pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; + + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_PWM_ENABLE); + + /* This won't stick until the above enable. */ + intel_backlight_set_pwm_level(conn_state, level); +} + +static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 cpu_ctl2, pch_ctl1, pch_ctl2; + + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + if (cpu_ctl2 & BLM_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n"); + cpu_ctl2 &= ~BLM_PWM_ENABLE; + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); + } + + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); + pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + } + + if (cpu_transcoder == TRANSCODER_EDP) + cpu_ctl2 = BLM_TRANSCODER_EDP; + else + cpu_ctl2 = BLM_PIPE(cpu_transcoder); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); + intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); + + /* This won't stick until the above enable. */ + intel_backlight_set_pwm_level(conn_state, level); + + pch_ctl2 = panel->backlight.pwm_level_max << 16; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); + + pch_ctl1 = 0; + if (panel->backlight.active_low_pwm) + pch_ctl1 |= BLM_PCH_POLARITY; + + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_PWM_ENABLE); +} + +static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 ctl, freq; + + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); + if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + intel_de_write(dev_priv, BLC_PWM_CTL, 0); + } + + freq = panel->backlight.pwm_level_max; + if (panel->backlight.combination_mode) + freq /= 0xff; + + ctl = freq << 17; + if (panel->backlight.combination_mode) + ctl |= BLM_LEGACY_MODE; + if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm) + ctl |= BLM_POLARITY_PNV; + + intel_de_write(dev_priv, BLC_PWM_CTL, ctl); + intel_de_posting_read(dev_priv, BLC_PWM_CTL); + + /* XXX: combine this into above write? */ + intel_backlight_set_pwm_level(conn_state, level); + + /* + * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is + * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 + * that has backlight. + */ + if (DISPLAY_VER(dev_priv) == 2) + intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); +} + +static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; + u32 ctl, ctl2, freq; + + ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); + if (ctl2 & BLM_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + ctl2 &= ~BLM_PWM_ENABLE; + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); + } + + freq = panel->backlight.pwm_level_max; + if (panel->backlight.combination_mode) + freq /= 0xff; + + ctl = freq << 16; + intel_de_write(dev_priv, BLC_PWM_CTL, ctl); + + ctl2 = BLM_PIPE(pipe); + if (panel->backlight.combination_mode) + ctl2 |= BLM_COMBINATION_MODE; + if (panel->backlight.active_low_pwm) + ctl2 |= BLM_POLARITY_I965; + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); + intel_de_posting_read(dev_priv, BLC_PWM_CTL2); + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); + + intel_backlight_set_pwm_level(conn_state, level); +} + +static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + u32 ctl, ctl2; + + ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + if (ctl2 & BLM_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + ctl2 &= ~BLM_PWM_ENABLE; + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); + } + + ctl = panel->backlight.pwm_level_max << 16; + intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl); + + /* XXX: combine this into above write? */ + intel_backlight_set_pwm_level(conn_state, level); + + ctl2 = 0; + if (panel->backlight.active_low_pwm) + ctl2 |= BLM_POLARITY_I965; + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); + intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), + ctl2 | BLM_PWM_ENABLE); +} + +static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + u32 pwm_ctl, val; + + /* Controller 1 uses the utility pin. */ + if (panel->backlight.controller == 1) { + val = intel_de_read(dev_priv, UTIL_PIN_CTL); + if (val & UTIL_PIN_ENABLE) { + drm_dbg_kms(&dev_priv->drm, + "util pin already enabled\n"); + val &= ~UTIL_PIN_ENABLE; + intel_de_write(dev_priv, UTIL_PIN_CTL, val); + } + + val = 0; + if (panel->backlight.util_pin_active_low) + val |= UTIL_PIN_POLARITY; + intel_de_write(dev_priv, UTIL_PIN_CTL, + val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); + } + + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + if (pwm_ctl & BXT_BLC_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + pwm_ctl &= ~BXT_BLC_PWM_ENABLE; + intel_de_write(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + } + + intel_de_write(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller), + panel->backlight.pwm_level_max); + + intel_backlight_set_pwm_level(conn_state, level); + + pwm_ctl = 0; + if (panel->backlight.active_low_pwm) + pwm_ctl |= BXT_BLC_PWM_POLARITY; + + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + intel_de_posting_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl | BXT_BLC_PWM_ENABLE); +} + +static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 pwm_ctl; + + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + if (pwm_ctl & BXT_BLC_PWM_ENABLE) { + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + pwm_ctl &= ~BXT_BLC_PWM_ENABLE; + intel_de_write(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + } + + intel_de_write(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller), + panel->backlight.pwm_level_max); + + intel_backlight_set_pwm_level(conn_state, level); + + pwm_ctl = 0; + if (panel->backlight.active_low_pwm) + pwm_ctl |= BXT_BLC_PWM_POLARITY; + + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + intel_de_posting_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl | BXT_BLC_PWM_ENABLE); +} + +static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_panel *panel = &connector->panel; + + pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); + panel->backlight.pwm_state.enabled = true; + pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); +} + +static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_panel *panel = &connector->panel; + + WARN_ON(panel->backlight.max == 0); + + if (panel->backlight.level <= panel->backlight.min) { + panel->backlight.level = panel->backlight.max; + if (panel->backlight.device) + panel->backlight.device->props.brightness = + scale_hw_to_user(connector, + panel->backlight.level, + panel->backlight.device->props.max_brightness); + } + + panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); + panel->backlight.enabled = true; + if (panel->backlight.device) + panel->backlight.device->props.power = FB_BLANK_UNBLANK; +} + +void intel_backlight_enable(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; + + if (!panel->backlight.present) + return; + + drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe)); + + mutex_lock(&dev_priv->backlight_lock); + + __intel_backlight_enable(crtc_state, conn_state); + + mutex_unlock(&dev_priv->backlight_lock); +} + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +static u32 intel_panel_get_backlight(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 val = 0; + + mutex_lock(&dev_priv->backlight_lock); + + if (panel->backlight.enabled) + val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector)); + + mutex_unlock(&dev_priv->backlight_lock); + + drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val); + return val; +} + +/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */ +static u32 scale_user_to_hw(struct intel_connector *connector, + u32 user_level, u32 user_max) +{ + struct intel_panel *panel = &connector->panel; + + return scale(user_level, 0, user_max, + panel->backlight.min, panel->backlight.max); +} + +/* set backlight brightness to level in range [0..max], scaling wrt hw min */ +static void intel_panel_set_backlight(const struct drm_connector_state *conn_state, + u32 user_level, u32 user_max) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 hw_level; + + if (!panel->backlight.present) + return; + + mutex_lock(&dev_priv->backlight_lock); + + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); + + hw_level = scale_user_to_hw(connector, user_level, user_max); + panel->backlight.level = hw_level; + + if (panel->backlight.enabled) + intel_panel_actually_set_backlight(conn_state, hw_level); + + mutex_unlock(&dev_priv->backlight_lock); +} + +static int intel_backlight_device_update_status(struct backlight_device *bd) +{ + struct intel_connector *connector = bl_get_data(bd); + struct intel_panel *panel = &connector->panel; + struct drm_device *dev = connector->base.dev; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + intel_panel_set_backlight(connector->base.state, bd->props.brightness, + bd->props.max_brightness); + + /* + * Allow flipping bl_power as a sub-state of enabled. Sadly the + * backlight class device does not make it easy to differentiate + * between callbacks for brightness and bl_power, so our backlight_power + * callback needs to take this into account. + */ + if (panel->backlight.enabled) { + if (panel->backlight.power) { + bool enable = bd->props.power == FB_BLANK_UNBLANK && + bd->props.brightness != 0; + panel->backlight.power(connector, enable); + } + } else { + bd->props.power = FB_BLANK_POWERDOWN; + } + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + return 0; +} + +static int intel_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct intel_connector *connector = bl_get_data(bd); + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + intel_wakeref_t wakeref; + int ret = 0; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { + u32 hw_level; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + hw_level = intel_panel_get_backlight(connector); + ret = scale_hw_to_user(connector, + hw_level, bd->props.max_brightness); + + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } + + return ret; +} + +static const struct backlight_ops intel_backlight_device_ops = { + .update_status = intel_backlight_device_update_status, + .get_brightness = intel_backlight_device_get_brightness, +}; + +int intel_backlight_device_register(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + struct backlight_properties props; + struct backlight_device *bd; + const char *name; + int ret = 0; + + if (WARN_ON(panel->backlight.device)) + return -ENODEV; + + if (!panel->backlight.present) + return 0; + + WARN_ON(panel->backlight.max == 0); + + memset(&props, 0, sizeof(props)); + props.type = BACKLIGHT_RAW; + + /* + * Note: Everything should work even if the backlight device max + * presented to the userspace is arbitrarily chosen. + */ + props.max_brightness = panel->backlight.max; + props.brightness = scale_hw_to_user(connector, + panel->backlight.level, + props.max_brightness); + + if (panel->backlight.enabled) + props.power = FB_BLANK_UNBLANK; + else + props.power = FB_BLANK_POWERDOWN; + + name = kstrdup("intel_backlight", GFP_KERNEL); + if (!name) + return -ENOMEM; + + bd = backlight_device_register(name, connector->base.kdev, connector, + &intel_backlight_device_ops, &props); + + /* + * Using the same name independent of the drm device or connector + * prevents registration of multiple backlight devices in the + * driver. However, we need to use the default name for backward + * compatibility. Use unique names for subsequent backlight devices as a + * fallback when the default name already exists. + */ + if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) { + kfree(name); + name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", + i915->drm.primary->index, connector->base.name); + if (!name) + return -ENOMEM; + + bd = backlight_device_register(name, connector->base.kdev, connector, + &intel_backlight_device_ops, &props); + } + + if (IS_ERR(bd)) { + drm_err(&i915->drm, + "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n", + connector->base.base.id, connector->base.name, name, PTR_ERR(bd)); + ret = PTR_ERR(bd); + goto out; + } + + panel->backlight.device = bd; + + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] backlight device %s registered\n", + connector->base.base.id, connector->base.name, name); + +out: + kfree(name); + + return ret; +} + +void intel_backlight_device_unregister(struct intel_connector *connector) +{ + struct intel_panel *panel = &connector->panel; + + if (panel->backlight.device) { + backlight_device_unregister(panel->backlight.device); + panel->backlight.device = NULL; + } +} +#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ + +/* + * CNP: PWM clock frequency is 19.2 MHz or 24 MHz. + * PWM increment = 1 + */ +static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), + pwm_freq_hz); +} + +/* + * BXT: PWM clock frequency = 19.2 MHz. + */ +static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz); +} + +/* + * SPT: This value represents the period of the PWM stream in clock periods + * multiplied by 16 (default increment) or 128 (alternate increment selected in + * SCHICKEN_1 bit 0). PWM clock is 24 MHz. + */ +static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct intel_panel *panel = &connector->panel; + u32 mul; + + if (panel->backlight.alternate_pwm_increment) + mul = 128; + else + mul = 16; + + return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul); +} + +/* + * LPT: This value represents the period of the PWM stream in clock periods + * multiplied by 128 (default increment) or 16 (alternate increment, selected in + * LPT SOUTH_CHICKEN2 register bit 5). + */ +static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 mul, clock; + + if (panel->backlight.alternate_pwm_increment) + mul = 16; + else + mul = 128; + + if (HAS_PCH_LPT_H(dev_priv)) + clock = MHz(135); /* LPT:H */ + else + clock = MHz(24); /* LPT:LP */ + + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); +} + +/* + * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH + * display raw clocks multiplied by 128. + */ +static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), + pwm_freq_hz * 128); +} + +/* + * Gen2: This field determines the number of time base events (display core + * clock frequency/32) in total for a complete cycle of modulated backlight + * control. + * + * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock) + * divided by 32. + */ +static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + int clock; + + if (IS_PINEVIEW(dev_priv)) + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); + else + clock = KHz(dev_priv->cdclk.hw.cdclk); + + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); +} + +/* + * Gen4: This value represents the period of the PWM stream in display core + * clocks ([DevCTG] HRAW clocks) multiplied by 128. + * + */ +static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + int clock; + + if (IS_G4X(dev_priv)) + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); + else + clock = KHz(dev_priv->cdclk.hw.cdclk); + + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); +} + +/* + * VLV: This value represents the period of the PWM stream in display core + * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks + * multiplied by 16. CHV uses a 19.2MHz S0IX clock. + */ +static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + int mul, clock; + + if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { + if (IS_CHERRYVIEW(dev_priv)) + clock = KHz(19200); + else + clock = MHz(25); + mul = 16; + } else { + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); + mul = 128; + } + + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); +} + +static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv) +{ + u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; + + if (pwm_freq_hz) { + drm_dbg_kms(&dev_priv->drm, + "VBT defined backlight frequency %u Hz\n", + pwm_freq_hz); + } else { + pwm_freq_hz = 200; + drm_dbg_kms(&dev_priv->drm, + "default backlight frequency %u Hz\n", + pwm_freq_hz); + } + + return pwm_freq_hz; +} + +static u32 get_backlight_max_vbt(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); + u32 pwm; + + if (!panel->backlight.pwm_funcs->hz_to_pwm) { + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion not supported\n"); + return 0; + } + + pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz); + if (!pwm) { + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion failed\n"); + return 0; + } + + return pwm; +} + +/* + * Note: The setup hooks can't assume pipe is set! + */ +static u32 get_backlight_min_vbt(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + int min; + + drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); + + /* + * XXX: If the vbt value is 255, it makes min equal to max, which leads + * to problems. There are such machines out there. Either our + * interpretation is wrong or the vbt has bogus data. Or both. Safeguard + * against this by letting the minimum be at most (arbitrarily chosen) + * 25% of the max. + */ + min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); + if (min != dev_priv->vbt.backlight.min_brightness) { + drm_dbg_kms(&dev_priv->drm, + "clamping VBT min backlight %d/255 to %d/255\n", + dev_priv->vbt.backlight.min_brightness, min); + } + + /* vbt value is a coefficient in range [0..255] */ + return scale(min, 0, 255, 0, panel->backlight.pwm_level_max); +} + +static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; + bool alt, cpu_mode; + + if (HAS_PCH_LPT(dev_priv)) + alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; + else + alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; + panel->backlight.alternate_pwm_increment = alt; + + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; + + pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); + panel->backlight.pwm_level_max = pch_ctl2 >> 16; + + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; + + cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) && + !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) && + (cpu_ctl2 & BLM_PWM_ENABLE); + + if (cpu_mode) { + val = pch_get_backlight(connector, unused); + + drm_dbg_kms(&dev_priv->drm, + "CPU backlight register was enabled, switching to PCH override\n"); + + /* Write converted CPU PWM value to PCH override register */ + lpt_set_backlight(connector->base.state, val); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); + + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, + cpu_ctl2 & ~BLM_PWM_ENABLE); + } + + return 0; +} + +static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 cpu_ctl2, pch_ctl1, pch_ctl2; + + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; + + pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); + panel->backlight.pwm_level_max = pch_ctl2 >> 16; + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && + (pch_ctl1 & BLM_PCH_PWM_ENABLE); + + return 0; +} + +static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 ctl, val; + + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); + + if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) + panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; + + if (IS_PINEVIEW(dev_priv)) + panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; + + panel->backlight.pwm_level_max = ctl >> 17; + + if (!panel->backlight.pwm_level_max) { + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + panel->backlight.pwm_level_max >>= 1; + } + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + if (panel->backlight.combination_mode) + panel->backlight.pwm_level_max *= 0xff; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + val = i9xx_get_backlight(connector, unused); + val = intel_backlight_invert_pwm_level(connector, val); + val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); + + panel->backlight.pwm_enabled = val != 0; + + return 0; +} + +static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 ctl, ctl2; + + ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); + panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE; + panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; + + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); + panel->backlight.pwm_level_max = ctl >> 16; + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + if (panel->backlight.combination_mode) + panel->backlight.pwm_level_max *= 0xff; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + + return 0; +} + +static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 ctl, ctl2; + + if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) + return -ENODEV; + + ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; + + ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)); + panel->backlight.pwm_level_max = ctl >> 16; + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + + return 0; +} + +static int +bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 pwm_ctl, val; + + panel->backlight.controller = dev_priv->vbt.backlight.controller; + + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + + /* Controller 1 uses the utility pin. */ + if (panel->backlight.controller == 1) { + val = intel_de_read(dev_priv, UTIL_PIN_CTL); + panel->backlight.util_pin_active_low = + val & UTIL_PIN_POLARITY; + } + + panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; + panel->backlight.pwm_level_max = + intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller)); + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + + return 0; +} + +static int +cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + u32 pwm_ctl; + + /* + * CNP has the BXT implementation of backlight, but with only one + * controller. TODO: ICP has multiple controllers but we only use + * controller 0 for now. + */ + panel->backlight.controller = 0; + + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + + panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; + panel->backlight.pwm_level_max = + intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller)); + + if (!panel->backlight.pwm_level_max) + panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); + + if (!panel->backlight.pwm_level_max) + return -ENODEV; + + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + + return 0; +} + +static int ext_pwm_setup_backlight(struct intel_connector *connector, + enum pipe pipe) +{ + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_panel *panel = &connector->panel; + const char *desc; + u32 level; + + /* Get the right PWM chip for DSI backlight according to VBT */ + if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { + panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight"); + desc = "PMIC"; + } else { + panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight"); + desc = "SoC"; + } + + if (IS_ERR(panel->backlight.pwm)) { + drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n", + desc); + panel->backlight.pwm = NULL; + return -ENODEV; + } + + panel->backlight.pwm_level_max = 100; /* 100% */ + panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); + + if (pwm_is_enabled(panel->backlight.pwm)) { + /* PWM is already enabled, use existing settings */ + pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state); + + level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state, + 100); + level = intel_backlight_invert_pwm_level(connector, level); + panel->backlight.pwm_enabled = true; + + drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", + NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, + get_vbt_pwm_freq(dev_priv), level); + } else { + /* Set period from VBT frequency, leave other settings at 0. */ + panel->backlight.pwm_state.period = + NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv); + } + + drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", + desc); + return 0; +} + +static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_panel *panel = &connector->panel; + + panel->backlight.pwm_funcs->set(conn_state, + intel_backlight_invert_pwm_level(connector, level)); +} + +static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe) +{ + struct intel_panel *panel = &connector->panel; + + return intel_backlight_invert_pwm_level(connector, + panel->backlight.pwm_funcs->get(connector, pipe)); +} + +static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_panel *panel = &connector->panel; + + panel->backlight.pwm_funcs->enable(crtc_state, conn_state, + intel_backlight_invert_pwm_level(connector, level)); +} + +static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_panel *panel = &connector->panel; + + panel->backlight.pwm_funcs->disable(conn_state, + intel_backlight_invert_pwm_level(connector, level)); +} + +static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) +{ + struct intel_panel *panel = &connector->panel; + int ret = panel->backlight.pwm_funcs->setup(connector, pipe); + + if (ret < 0) + return ret; + + panel->backlight.min = panel->backlight.pwm_level_min; + panel->backlight.max = panel->backlight.pwm_level_max; + panel->backlight.level = intel_pwm_get_backlight(connector, pipe); + panel->backlight.enabled = panel->backlight.pwm_enabled; + + return 0; +} + +void intel_backlight_update(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + + if (!panel->backlight.present) + return; + + mutex_lock(&dev_priv->backlight_lock); + if (!panel->backlight.enabled) + __intel_backlight_enable(crtc_state, conn_state); + + mutex_unlock(&dev_priv->backlight_lock); +} + +int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_panel *panel = &connector->panel; + int ret; + + if (!dev_priv->vbt.backlight.present) { + if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { + drm_dbg_kms(&dev_priv->drm, + "no backlight present per VBT, but present per quirk\n"); + } else { + drm_dbg_kms(&dev_priv->drm, + "no backlight present per VBT\n"); + return 0; + } + } + + /* ensure intel_panel has been initialized first */ + if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs)) + return -ENODEV; + + /* set level and max in panel struct */ + mutex_lock(&dev_priv->backlight_lock); + ret = panel->backlight.funcs->setup(connector, pipe); + mutex_unlock(&dev_priv->backlight_lock); + + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "failed to setup backlight for connector %s\n", + connector->base.name); + return ret; + } + + panel->backlight.present = true; + + drm_dbg_kms(&dev_priv->drm, + "Connector %s backlight initialized, %s, brightness %u/%u\n", + connector->base.name, + enableddisabled(panel->backlight.enabled), + panel->backlight.level, panel->backlight.max); + + return 0; +} + +void intel_backlight_destroy(struct intel_panel *panel) +{ + /* dispose of the pwm */ + if (panel->backlight.pwm) + pwm_put(panel->backlight.pwm); + + panel->backlight.present = false; +} + +static const struct intel_panel_bl_funcs bxt_pwm_funcs = { + .setup = bxt_setup_backlight, + .enable = bxt_enable_backlight, + .disable = bxt_disable_backlight, + .set = bxt_set_backlight, + .get = bxt_get_backlight, + .hz_to_pwm = bxt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs cnp_pwm_funcs = { + .setup = cnp_setup_backlight, + .enable = cnp_enable_backlight, + .disable = cnp_disable_backlight, + .set = bxt_set_backlight, + .get = bxt_get_backlight, + .hz_to_pwm = cnp_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs lpt_pwm_funcs = { + .setup = lpt_setup_backlight, + .enable = lpt_enable_backlight, + .disable = lpt_disable_backlight, + .set = lpt_set_backlight, + .get = lpt_get_backlight, + .hz_to_pwm = lpt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs spt_pwm_funcs = { + .setup = lpt_setup_backlight, + .enable = lpt_enable_backlight, + .disable = lpt_disable_backlight, + .set = lpt_set_backlight, + .get = lpt_get_backlight, + .hz_to_pwm = spt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs pch_pwm_funcs = { + .setup = pch_setup_backlight, + .enable = pch_enable_backlight, + .disable = pch_disable_backlight, + .set = pch_set_backlight, + .get = pch_get_backlight, + .hz_to_pwm = pch_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs ext_pwm_funcs = { + .setup = ext_pwm_setup_backlight, + .enable = ext_pwm_enable_backlight, + .disable = ext_pwm_disable_backlight, + .set = ext_pwm_set_backlight, + .get = ext_pwm_get_backlight, +}; + +static const struct intel_panel_bl_funcs vlv_pwm_funcs = { + .setup = vlv_setup_backlight, + .enable = vlv_enable_backlight, + .disable = vlv_disable_backlight, + .set = vlv_set_backlight, + .get = vlv_get_backlight, + .hz_to_pwm = vlv_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs i965_pwm_funcs = { + .setup = i965_setup_backlight, + .enable = i965_enable_backlight, + .disable = i965_disable_backlight, + .set = i9xx_set_backlight, + .get = i9xx_get_backlight, + .hz_to_pwm = i965_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs i9xx_pwm_funcs = { + .setup = i9xx_setup_backlight, + .enable = i9xx_enable_backlight, + .disable = i9xx_disable_backlight, + .set = i9xx_set_backlight, + .get = i9xx_get_backlight, + .hz_to_pwm = i9xx_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs pwm_bl_funcs = { + .setup = intel_pwm_setup_backlight, + .enable = intel_pwm_enable_backlight, + .disable = intel_pwm_disable_backlight, + .set = intel_pwm_set_backlight, + .get = intel_pwm_get_backlight, +}; + +/* Set up chip specific backlight functions */ +void intel_backlight_init_funcs(struct intel_panel *panel) +{ + struct intel_connector *connector = + container_of(panel, struct intel_connector, panel); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && + intel_dsi_dcs_init_backlight_funcs(connector) == 0) + return; + + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + panel->backlight.pwm_funcs = &bxt_pwm_funcs; + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { + panel->backlight.pwm_funcs = &cnp_pwm_funcs; + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { + if (HAS_PCH_LPT(dev_priv)) + panel->backlight.pwm_funcs = &lpt_pwm_funcs; + else + panel->backlight.pwm_funcs = &spt_pwm_funcs; + } else if (HAS_PCH_SPLIT(dev_priv)) { + panel->backlight.pwm_funcs = &pch_pwm_funcs; + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { + panel->backlight.pwm_funcs = &ext_pwm_funcs; + } else { + panel->backlight.pwm_funcs = &vlv_pwm_funcs; + } + } else if (DISPLAY_VER(dev_priv) == 4) { + panel->backlight.pwm_funcs = &i965_pwm_funcs; + } else { + panel->backlight.pwm_funcs = &i9xx_pwm_funcs; + } + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && + intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + /* We're using a standard PWM backlight interface */ + panel->backlight.funcs = &pwm_bl_funcs; +} diff --git a/drivers/gpu/drm/i915/display/intel_backlight.h b/drivers/gpu/drm/i915/display/intel_backlight.h new file mode 100644 index 000000000000..339643f63897 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_backlight.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_BACKLIGHT_H__ +#define __INTEL_BACKLIGHT_H__ + +#include <linux/types.h> + +struct drm_connector_state; +struct intel_atomic_state; +struct intel_connector; +struct intel_crtc_state; +struct intel_encoder; +struct intel_panel; +enum pipe; + +void intel_backlight_init_funcs(struct intel_panel *panel); +int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe); +void intel_backlight_destroy(struct intel_panel *panel); + +void intel_backlight_enable(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +void intel_backlight_update(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +void intel_backlight_disable(const struct drm_connector_state *old_conn_state); + +void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, + u32 level, u32 max); +void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, + u32 level); +u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 level); +u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 level); +u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val); + +#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) +int intel_backlight_device_register(struct intel_connector *connector); +void intel_backlight_device_unregister(struct intel_connector *connector); +#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ +static inline int intel_backlight_device_register(struct intel_connector *connector) +{ + return 0; +} +static inline void intel_backlight_device_unregister(struct intel_connector *connector) +{ +} +#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ + +#endif /* __INTEL_BACKLIGHT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index e86e6ed2d3bf..9d989c9f5da4 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -451,13 +451,23 @@ parse_lfp_backlight(struct drm_i915_private *i915, } i915->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; - if (bdb->version >= 191 && - get_blocksize(backlight_data) >= sizeof(*backlight_data)) { - const struct lfp_backlight_control_method *method; + if (bdb->version >= 191) { + size_t exp_size; - method = &backlight_data->backlight_control[panel_type]; - i915->vbt.backlight.type = method->type; - i915->vbt.backlight.controller = method->controller; + if (bdb->version >= 236) + exp_size = sizeof(struct bdb_lfp_backlight_data); + else if (bdb->version >= 234) + exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; + else + exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; + + if (get_blocksize(backlight_data) >= exp_size) { + const struct lfp_backlight_control_method *method; + + method = &backlight_data->backlight_control[panel_type]; + i915->vbt.backlight.type = method->type; + i915->vbt.backlight.controller = method->controller; + } } i915->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; @@ -483,6 +493,9 @@ parse_lfp_backlight(struct drm_i915_private *i915, level = 255; } i915->vbt.backlight.min_brightness = min_level; + + i915->vbt.backlight.brightness_precision_bits = + backlight_data->brightness_precision_bits[panel_type]; } else { level = backlight_data->level[panel_type]; i915->vbt.backlight.min_brightness = entry->min_brightness; @@ -1501,39 +1514,142 @@ static u8 translate_iboost(u8 val) return mapping[val]; } +static const u8 cnp_ddc_pin_map[] = { + [0] = 0, /* N/A */ + [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, + [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, + [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ + [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */ +}; + +static const u8 icp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT, + [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, + [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, + [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, + [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, + [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP, + [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, +}; + +static const u8 rkl_pch_tgp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP, + [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP, +}; + +static const u8 adls_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, + [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, + [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, + [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, +}; + +static const u8 gen9bc_tgp_ddc_pin_map[] = { + [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, + [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, +}; + +static const u8 adlp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [ADLP_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, + [ADLP_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, + [ADLP_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, + [ADLP_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, +}; + +static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) +{ + const u8 *ddc_pin_map; + int n_entries; + + if (IS_ALDERLAKE_P(i915)) { + ddc_pin_map = adlp_ddc_pin_map; + n_entries = ARRAY_SIZE(adlp_ddc_pin_map); + } else if (IS_ALDERLAKE_S(i915)) { + ddc_pin_map = adls_ddc_pin_map; + n_entries = ARRAY_SIZE(adls_ddc_pin_map); + } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { + return vbt_pin; + } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { + ddc_pin_map = rkl_pch_tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); + } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) { + ddc_pin_map = gen9bc_tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map); + } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { + ddc_pin_map = icp_ddc_pin_map; + n_entries = ARRAY_SIZE(icp_ddc_pin_map); + } else if (HAS_PCH_CNP(i915)) { + ddc_pin_map = cnp_ddc_pin_map; + n_entries = ARRAY_SIZE(cnp_ddc_pin_map); + } else { + /* Assuming direct map */ + return vbt_pin; + } + + if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0) + return ddc_pin_map[vbt_pin]; + + drm_dbg_kms(&i915->drm, + "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", + vbt_pin); + return 0; +} + static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) { - const struct ddi_vbt_port_info *info; + const struct intel_bios_encoder_data *devdata; enum port port; if (!ddc_pin) return PORT_NONE; for_each_port(port) { - info = &i915->vbt.ddi_port_info[port]; + devdata = i915->vbt.ports[port]; - if (info->devdata && ddc_pin == info->alternate_ddc_pin) + if (devdata && ddc_pin == devdata->child.ddc_pin) return port; } return PORT_NONE; } -static void sanitize_ddc_pin(struct drm_i915_private *i915, +static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, enum port port) { - struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port]; + struct drm_i915_private *i915 = devdata->i915; struct child_device_config *child; + u8 mapped_ddc_pin; enum port p; - p = get_port_by_ddc_pin(i915, info->alternate_ddc_pin); + if (!devdata->child.ddc_pin) + return; + + mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin); + if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) { + drm_dbg_kms(&i915->drm, + "Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), mapped_ddc_pin); + devdata->child.ddc_pin = 0; + return; + } + + p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin); if (p == PORT_NONE) return; drm_dbg_kms(&i915->drm, "port %c trying to use the same DDC pin (0x%x) as port %c, " "disabling port %c DVI/HDMI support\n", - port_name(port), info->alternate_ddc_pin, + port_name(port), mapped_ddc_pin, port_name(p), port_name(p)); /* @@ -1545,48 +1661,47 @@ static void sanitize_ddc_pin(struct drm_i915_private *i915, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - info = &i915->vbt.ddi_port_info[p]; - child = &info->devdata->child; + child = &i915->vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING; child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; - info->alternate_ddc_pin = 0; + child->ddc_pin = 0; } static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) { - const struct ddi_vbt_port_info *info; + const struct intel_bios_encoder_data *devdata; enum port port; if (!aux_ch) return PORT_NONE; for_each_port(port) { - info = &i915->vbt.ddi_port_info[port]; + devdata = i915->vbt.ports[port]; - if (info->devdata && aux_ch == info->alternate_aux_channel) + if (devdata && aux_ch == devdata->child.aux_channel) return port; } return PORT_NONE; } -static void sanitize_aux_ch(struct drm_i915_private *i915, +static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata, enum port port) { - struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port]; + struct drm_i915_private *i915 = devdata->i915; struct child_device_config *child; enum port p; - p = get_port_by_aux_ch(i915, info->alternate_aux_channel); + p = get_port_by_aux_ch(i915, devdata->child.aux_channel); if (p == PORT_NONE) return; drm_dbg_kms(&i915->drm, "port %c trying to use the same AUX CH (0x%x) as port %c, " "disabling port %c DP support\n", - port_name(port), info->alternate_aux_channel, + port_name(port), devdata->child.aux_channel, port_name(p), port_name(p)); /* @@ -1598,88 +1713,43 @@ static void sanitize_aux_ch(struct drm_i915_private *i915, * there are real machines (eg. Asrock B250M-HDV) where VBT has both * port A and port E with the same AUX ch and we must pick port E :( */ - info = &i915->vbt.ddi_port_info[p]; - child = &info->devdata->child; + child = &i915->vbt.ports[p]->child; child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT; - info->alternate_aux_channel = 0; -} - -static const u8 cnp_ddc_pin_map[] = { - [0] = 0, /* N/A */ - [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, - [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, - [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ - [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */ -}; - -static const u8 icp_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT, - [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, - [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, - [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, - [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, - [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP, - [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, -}; - -static const u8 rkl_pch_tgp_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP, - [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP, -}; - -static const u8 adls_ddc_pin_map[] = { - [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, - [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP, - [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP, - [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP, - [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP, -}; - -static const u8 gen9bc_tgp_ddc_pin_map[] = { - [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, - [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, - [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP, -}; - -static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) -{ - const u8 *ddc_pin_map; - int n_entries; - - if (IS_ALDERLAKE_S(i915)) { - ddc_pin_map = adls_ddc_pin_map; - n_entries = ARRAY_SIZE(adls_ddc_pin_map); - } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) { - return vbt_pin; - } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) { - ddc_pin_map = rkl_pch_tgp_ddc_pin_map; - n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); - } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) { - ddc_pin_map = gen9bc_tgp_ddc_pin_map; - n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map); - } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) { - ddc_pin_map = icp_ddc_pin_map; - n_entries = ARRAY_SIZE(icp_ddc_pin_map); - } else if (HAS_PCH_CNP(i915)) { - ddc_pin_map = cnp_ddc_pin_map; - n_entries = ARRAY_SIZE(cnp_ddc_pin_map); - } else { - /* Assuming direct map */ - return vbt_pin; + child->aux_channel = 0; +} + +static u8 dvo_port_type(u8 dvo_port) +{ + switch (dvo_port) { + case DVO_PORT_HDMIA: + case DVO_PORT_HDMIB: + case DVO_PORT_HDMIC: + case DVO_PORT_HDMID: + case DVO_PORT_HDMIE: + case DVO_PORT_HDMIF: + case DVO_PORT_HDMIG: + case DVO_PORT_HDMIH: + case DVO_PORT_HDMII: + return DVO_PORT_HDMIA; + case DVO_PORT_DPA: + case DVO_PORT_DPB: + case DVO_PORT_DPC: + case DVO_PORT_DPD: + case DVO_PORT_DPE: + case DVO_PORT_DPF: + case DVO_PORT_DPG: + case DVO_PORT_DPH: + case DVO_PORT_DPI: + return DVO_PORT_DPA; + case DVO_PORT_MIPIA: + case DVO_PORT_MIPIB: + case DVO_PORT_MIPIC: + case DVO_PORT_MIPID: + return DVO_PORT_MIPIA; + default: + return dvo_port; } - - if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0) - return ddc_pin_map[vbt_pin]; - - drm_dbg_kms(&i915->drm, - "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", - vbt_pin); - return 0; } static enum port __dvo_port_to_port(int n_ports, int n_dvo, @@ -1815,6 +1885,17 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) } } +static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) +{ + if (!devdata || devdata->i915->vbt.version < 216) + return 0; + + if (devdata->i915->vbt.version >= 230) + return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate); + else + return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); +} + static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { @@ -1868,6 +1949,32 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR; } +static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) +{ + if (!devdata || devdata->i915->vbt.version < 158) + return -1; + + return devdata->child.hdmi_level_shifter_value; +} + +static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) +{ + if (!devdata || devdata->i915->vbt.version < 204) + return 0; + + switch (devdata->child.hdmi_max_data_rate) { + default: + MISSING_CASE(devdata->child.hdmi_max_data_rate); + fallthrough; + case HDMI_MAX_DATA_RATE_PLATFORM: + return 0; + case HDMI_MAX_DATA_RATE_297: + return 297000; + case HDMI_MAX_DATA_RATE_165: + return 165000; + } +} + static bool is_port_valid(struct drm_i915_private *i915, enum port port) { /* @@ -1885,9 +1992,8 @@ static void parse_ddi_port(struct drm_i915_private *i915, struct intel_bios_encoder_data *devdata) { const struct child_device_config *child = &devdata->child; - struct ddi_vbt_port_info *info; bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt; - int dp_boost_level, hdmi_boost_level; + int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock; enum port port; port = dvo_port_to_port(i915, child->dvo_port); @@ -1901,9 +2007,7 @@ static void parse_ddi_port(struct drm_i915_private *i915, return; } - info = &i915->vbt.ddi_port_info[port]; - - if (info->devdata) { + if (i915->vbt.ports[port]) { drm_dbg_kms(&i915->drm, "More than one child device for port %c in VBT, using the first.\n", port_name(port)); @@ -1928,62 +2032,24 @@ static void parse_ddi_port(struct drm_i915_private *i915, supports_typec_usb, supports_tbt, devdata->dsc != NULL); - if (is_dvi) { - u8 ddc_pin; - - ddc_pin = map_ddc_pin(i915, child->ddc_pin); - if (intel_gmbus_is_valid_pin(i915, ddc_pin)) { - info->alternate_ddc_pin = ddc_pin; - sanitize_ddc_pin(i915, port); - } else { - drm_dbg_kms(&i915->drm, - "Port %c has invalid DDC pin %d, " - "sticking to defaults\n", - port_name(port), ddc_pin); - } - } - - if (is_dp) { - info->alternate_aux_channel = child->aux_channel; + if (is_dvi) + sanitize_ddc_pin(devdata, port); - sanitize_aux_ch(i915, port); - } + if (is_dp) + sanitize_aux_ch(devdata, port); - if (i915->vbt.version >= 158) { - /* The VBT HDMI level shift values match the table we have. */ - u8 hdmi_level_shift = child->hdmi_level_shifter_value; + hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata); + if (hdmi_level_shift >= 0) { drm_dbg_kms(&i915->drm, "Port %c VBT HDMI level shift: %d\n", - port_name(port), - hdmi_level_shift); - info->hdmi_level_shift = hdmi_level_shift; - info->hdmi_level_shift_set = true; + port_name(port), hdmi_level_shift); } - if (i915->vbt.version >= 204) { - int max_tmds_clock; - - switch (child->hdmi_max_data_rate) { - default: - MISSING_CASE(child->hdmi_max_data_rate); - fallthrough; - case HDMI_MAX_DATA_RATE_PLATFORM: - max_tmds_clock = 0; - break; - case HDMI_MAX_DATA_RATE_297: - max_tmds_clock = 297000; - break; - case HDMI_MAX_DATA_RATE_165: - max_tmds_clock = 165000; - break; - } - - if (max_tmds_clock) - drm_dbg_kms(&i915->drm, - "Port %c VBT HDMI max TMDS clock: %d kHz\n", - port_name(port), max_tmds_clock); - info->max_tmds_clock = max_tmds_clock; - } + max_tmds_clock = _intel_bios_max_tmds_clock(devdata); + if (max_tmds_clock) + drm_dbg_kms(&i915->drm, + "Port %c VBT HDMI max TMDS clock: %d kHz\n", + port_name(port), max_tmds_clock); /* I_boost config for SKL and above */ dp_boost_level = intel_bios_encoder_dp_boost_level(devdata); @@ -1998,19 +2064,13 @@ static void parse_ddi_port(struct drm_i915_private *i915, "Port %c VBT HDMI boost level: %d\n", port_name(port), hdmi_boost_level); - /* DP max link rate for GLK+ */ - if (i915->vbt.version >= 216) { - if (i915->vbt.version >= 230) - info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate); - else - info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate); - + dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata); + if (dp_max_link_rate) drm_dbg_kms(&i915->drm, "Port %c VBT DP max link rate: %d\n", - port_name(port), info->dp_max_link_rate); - } + port_name(port), dp_max_link_rate); - info->devdata = devdata; + i915->vbt.ports[port] = devdata; } static void parse_ddi_ports(struct drm_i915_private *i915) @@ -2548,12 +2608,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, }; - if (HAS_DDI(i915)) { - const struct ddi_vbt_port_info *port_info = - &i915->vbt.ddi_port_info[port]; - - return port_info->devdata; - } + if (HAS_DDI(i915)) + return i915->vbt.ports[port]; /* FIXME maybe deal with port A as well? */ if (drm_WARN_ON(&i915->drm, @@ -2612,35 +2668,17 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) return false; } -static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, - enum port port) +static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) { - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - /* - * Buggy VBTs may declare DP ports as having - * HDMI type dvo_port :( So let's check both. - */ - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - - if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) - return false; - if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) return false; - if (child->dvo_port == port_mapping[port].dp) + if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) return true; /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ - if (child->dvo_port == port_mapping[port].hdmi && + if (dvo_port_type(child->dvo_port) == DVO_PORT_HDMIA && child->aux_channel != 0) return true; @@ -2650,10 +2688,36 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port) { + static const struct { + u16 dp, hdmi; + } port_mapping[] = { + /* + * Buggy VBTs may declare DP ports as having + * HDMI type dvo_port :( So let's check both. + */ + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, + [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, + }; const struct intel_bios_encoder_data *devdata; + if (HAS_DDI(i915)) { + const struct intel_bios_encoder_data *devdata; + + devdata = intel_bios_encoder_data_lookup(i915, port); + + return devdata && child_dev_is_dp_dual_mode(&devdata->child); + } + + if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) + return false; + list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - if (child_dev_is_dp_dual_mode(&devdata->child, port)) + if ((devdata->child.dvo_port == port_mapping[port].dp || + devdata->child.dvo_port == port_mapping[port].hdmi) && + child_dev_is_dp_dual_mode(&devdata->child)) return true; } @@ -2804,8 +2868,7 @@ bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = - i915->vbt.ddi_port_info[port].devdata; + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) @@ -2825,8 +2888,7 @@ bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = - i915->vbt.ddi_port_info[port].devdata; + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; } @@ -2842,8 +2904,7 @@ bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata = - i915->vbt.ddi_port_info[port].devdata; + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; return devdata && devdata->child.lane_reversal; } @@ -2851,11 +2912,10 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, enum port port) { - const struct ddi_vbt_port_info *info = - &i915->vbt.ddi_port_info[port]; + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port]; enum aux_ch aux_ch; - if (!info->alternate_aux_channel) { + if (!devdata || !devdata->child.aux_channel) { aux_ch = (enum aux_ch)port; drm_dbg_kms(&i915->drm, @@ -2871,7 +2931,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E * map to DDI A,TC1,TC2,TC3,TC4 respectively. */ - switch (info->alternate_aux_channel) { + switch (devdata->child.aux_channel) { case DP_AUX_A: aux_ch = AUX_CH_A; break; @@ -2932,7 +2992,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_I; break; default: - MISSING_CASE(info->alternate_aux_channel); + MISSING_CASE(devdata->child.aux_channel); aux_ch = AUX_CH_A; break; } @@ -2946,17 +3006,18 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, int intel_bios_max_tmds_clock(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; - return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock; + return _intel_bios_max_tmds_clock(devdata); } +/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct ddi_vbt_port_info *info = - &i915->vbt.ddi_port_info[encoder->port]; + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; - return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1; + return _intel_bios_hdmi_level_shift(devdata); } int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) @@ -2978,15 +3039,20 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; - return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate; + return _intel_bios_dp_max_link_rate(devdata); } int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port]; + + if (!devdata || !devdata->child.ddc_pin) + return 0; - return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin; + return map_ddc_pin(i915, devdata->child.ddc_pin); } bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) @@ -3002,5 +3068,5 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { - return i915->vbt.ddi_port_info[port].devdata; + return i915->vbt.ports[port]; } diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index e91e0e0191fb..2da4aacc956b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -9,8 +9,8 @@ #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_pcode.h" #include "intel_pm.h" -#include "intel_sideband.h" /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { @@ -27,6 +27,9 @@ struct intel_qgv_info { u8 num_points; u8 num_psf_points; u8 t_bl; + u8 max_numchannels; + u8 channel_width; + u8 deinterleave; }; static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, @@ -42,7 +45,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ else dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ - sp->dclk = dclk_ratio * dclk_reference; + sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) @@ -69,6 +72,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, int point) { u32 val = 0, val2 = 0; + u16 dclk; int ret; ret = sandybridge_pcode_read(dev_priv, @@ -78,7 +82,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, if (ret) return ret; - sp->dclk = val & 0xffff; + dclk = val & 0xffff; + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -133,7 +138,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, } static int icl_get_qgv_points(struct drm_i915_private *dev_priv, - struct intel_qgv_info *qi) + struct intel_qgv_info *qi, + bool is_y_tile) { const struct dram_info *dram_info = &dev_priv->dram_info; int i, ret; @@ -141,20 +147,44 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(dev_priv) >= 12) switch (dram_info->type) { case INTEL_DRAM_DDR4: - qi->t_bl = 4; + qi->t_bl = is_y_tile ? 8 : 4; + qi->max_numchannels = 2; + qi->channel_width = 64; + qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_DDR5: - qi->t_bl = 8; + qi->t_bl = is_y_tile ? 16 : 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = is_y_tile ? 1 : 2; + break; + case INTEL_DRAM_LPDDR4: + if (IS_ROCKETLAKE(dev_priv)) { + qi->t_bl = 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = 2; + break; + } + fallthrough; + case INTEL_DRAM_LPDDR5: + qi->t_bl = 16; + qi->max_numchannels = 8; + qi->channel_width = 16; + qi->deinterleave = is_y_tile ? 2 : 4; break; default: qi->t_bl = 16; + qi->max_numchannels = 1; break; } - else if (DISPLAY_VER(dev_priv) == 11) + else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; + qi->max_numchannels = 1; + } if (drm_WARN_ON(&dev_priv->drm, qi->num_points > ARRAY_SIZE(qi->points))) @@ -193,12 +223,6 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, return 0; } -static int icl_calc_bw(int dclk, int num, int den) -{ - /* multiples of 16.666MHz (100/6) */ - return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6); -} - static int adl_calc_psf_bw(int clk) { /* @@ -222,31 +246,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) struct intel_sa_info { u16 displayrtids; - u8 deburst, deprogbwlimit; + u8 deburst, deprogbwlimit, derating; }; static const struct intel_sa_info icl_sa_info = { .deburst = 8, .deprogbwlimit = 25, /* GB/s */ .displayrtids = 128, + .derating = 10, }; static const struct intel_sa_info tgl_sa_info = { .deburst = 16, .deprogbwlimit = 34, /* GB/s */ .displayrtids = 256, + .derating = 10, }; static const struct intel_sa_info rkl_sa_info = { - .deburst = 16, + .deburst = 8, .deprogbwlimit = 20, /* GB/s */ .displayrtids = 128, + .derating = 10, }; static const struct intel_sa_info adls_sa_info = { .deburst = 16, .deprogbwlimit = 38, /* GB/s */ .displayrtids = 256, + .derating = 10, +}; + +static const struct intel_sa_info adlp_sa_info = { + .deburst = 16, + .deprogbwlimit = 38, /* GB/s */ + .displayrtids = 256, + .derating = 20, }; static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) @@ -254,35 +289,130 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); - int deinterleave; - int ipqdepth, ipqdepthpch; + int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi); + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } - deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); dclk_max = icl_sagv_max_dclk(&qi); + maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); + ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + for (i = 0; i < num_groups; i++) { + struct intel_bw_info *bi = &dev_priv->max_bw[i]; + int clpchgroup; + int j; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + + bi->num_qgv_points = qi.num_points; + bi->num_psf_gv_points = qi.num_psf_points; + + for (j = 0; j < qi.num_points; j++) { + const struct intel_qgv_point *sp = &qi.points[j]; + int ct, bw; - ipqdepthpch = 16; + /* + * Max row cycle time + * + * FIXME what is the logic behind the + * assumed burst length? + */ + ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); + + bi->deratedbw[j] = min(maxdebw, + bw * (100 - sa->derating) / 100); + + drm_dbg_kms(&dev_priv->drm, + "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", + i, j, bi->num_planes, bi->deratedbw[j]); + } + } + /* + * In case if SAGV is disabled in BIOS, we always get 1 + * SAGV point, but we can't send PCode commands to restrict it + * as it will fail and pointless anyway. + */ + if (qi.num_points == 1) + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + else + dev_priv->sagv_status = I915_SAGV_ENABLED; + + return 0; +} + +static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +{ + struct intel_qgv_info qi = {}; + const struct dram_info *dram_info = &dev_priv->dram_info; + bool is_y_tile = true; /* assume y tile may be used */ + int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int ipqdepth, ipqdepthpch = 16; + int dclk_max; + int maxdebw, peakbw; + int clperchgroup; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int i, ret; + + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "Failed to get memory subsystem information, ignoring bandwidth limits"); + return ret; + } + + if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5) + num_channels *= 2; + + qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) + qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); + + if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) + drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); + if (qi.max_numchannels != 0) + num_channels = min_t(u8, num_channels, qi.max_numchannels); + + dclk_max = icl_sagv_max_dclk(&qi); + + peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; + maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */ - maxdebw = min(sa->deprogbwlimit * 1000, - icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + /* + * clperchgroup = 4kpagespermempage * clperchperblock, + * clperchperblock = 8 / num_channels * interleave + */ + clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi_next; int clpchgroup; int j; - clpchgroup = (sa->deburst * deinterleave / num_channels) << i; - bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + if (i < num_groups - 1) + bi_next = &dev_priv->max_bw[i + 1]; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + + if (i < num_groups - 1 && clpchgroup < clperchgroup) + bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + else + bi_next->num_planes = 0; bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; @@ -299,10 +429,10 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel */ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); - bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); bi->deratedbw[j] = min(maxdebw, - bw * 9 / 10); /* 90% */ + bw * (100 - sa->derating) / 100); drm_dbg_kms(&dev_priv->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", @@ -318,9 +448,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } - - if (bi->num_planes == 1) - break; } /* @@ -384,6 +511,34 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, return 0; } +static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) +{ + int i; + + /* + * Let's return max bw for 0 planes + */ + num_planes = max(1, num_planes); + + for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { + const struct intel_bw_info *bi = + &dev_priv->max_bw[i]; + + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + + if (num_planes <= bi->num_planes) + return bi->deratedbw[qgv_point]; + } + + return dev_priv->max_bw[0].deratedbw[qgv_point]; +} + static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { @@ -400,12 +555,14 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); - else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) - icl_get_bw_info(dev_priv, &adls_sa_info); + else if (IS_ALDERLAKE_P(dev_priv)) + tgl_get_bw_info(dev_priv, &adlp_sa_info); + else if (IS_ALDERLAKE_S(dev_priv)) + tgl_get_bw_info(dev_priv, &adls_sa_info); else if (IS_ROCKETLAKE(dev_priv)) - icl_get_bw_info(dev_priv, &rkl_sa_info); + tgl_get_bw_info(dev_priv, &rkl_sa_info); else if (DISPLAY_VER(dev_priv) == 12) - icl_get_bw_info(dev_priv, &tgl_sa_info); + tgl_get_bw_info(dev_priv, &tgl_sa_info); else if (DISPLAY_VER(dev_priv) == 11) icl_get_bw_info(dev_priv, &icl_sa_info); } @@ -477,7 +634,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active()) + if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv)) data_rate = data_rate * 105 / 100; return data_rate; @@ -733,7 +890,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) for (i = 0; i < num_qgv_points; i++) { unsigned int max_data_rate; - max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); + if (DISPLAY_VER(dev_priv) > 11) + max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i); + else + max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); /* * We need to know which qgv point gives us * maximum bandwidth in order to disable SAGV diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 34fa4130d5c4..c30cf8d2b835 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -24,12 +24,16 @@ #include <linux/time.h> #include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_pcode.h" #include "intel_psr.h" -#include "intel_sideband.h" +#include "vlv_sideband.h" /** * DOC: CDCLK / RAWCLK @@ -59,6 +63,37 @@ * dividers can be programmed correctly. */ +void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_config *cdclk_config) +{ + dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config); +} + +static int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state); +} + +static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *cdclk_config, + enum pipe pipe) +{ + dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe); +} + +static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_config) +{ + return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config); +} + +static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv, + int cdclk) +{ + return dev_priv->cdclk_funcs->calc_voltage_level(cdclk); +} + static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { @@ -1179,6 +1214,19 @@ static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } +static bool has_cdclk_squasher(struct drm_i915_private *i915) +{ + return IS_DG2(i915); +} + +struct intel_cdclk_vals { + u32 cdclk; + u16 refclk; + u16 waveform; + u8 divider; /* CD2X divider * 2 */ + u8 ratio; +}; + static const struct intel_cdclk_vals bxt_cdclk_table[] = { { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, @@ -1280,12 +1328,19 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = { }; static const struct intel_cdclk_vals dg2_cdclk_table[] = { - { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, - { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, - { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, - { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, - { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, - { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, + { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, + { .refclk = 38400, .cdclk = 244800, .divider = 2, .ratio = 34, .waveform = 0xa4a4 }, + { .refclk = 38400, .cdclk = 285600, .divider = 2, .ratio = 34, .waveform = 0xa54a }, + { .refclk = 38400, .cdclk = 326400, .divider = 2, .ratio = 34, .waveform = 0xaaaa }, + { .refclk = 38400, .cdclk = 367200, .divider = 2, .ratio = 34, .waveform = 0xad5a }, + { .refclk = 38400, .cdclk = 408000, .divider = 2, .ratio = 34, .waveform = 0xb6b6 }, + { .refclk = 38400, .cdclk = 448800, .divider = 2, .ratio = 34, .waveform = 0xdbb6 }, + { .refclk = 38400, .cdclk = 489600, .divider = 2, .ratio = 34, .waveform = 0xeeee }, + { .refclk = 38400, .cdclk = 530400, .divider = 2, .ratio = 34, .waveform = 0xf7de }, + { .refclk = 38400, .cdclk = 571200, .divider = 2, .ratio = 34, .waveform = 0xfefe }, + { .refclk = 38400, .cdclk = 612000, .divider = 2, .ratio = 34, .waveform = 0xfffe }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0xffff }, {} }; @@ -1421,6 +1476,7 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, static void bxt_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { + u32 squash_ctl = 0; u32 divider; int div; @@ -1458,7 +1514,21 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + if (has_cdclk_squasher(dev_priv)) + squash_ctl = intel_de_read(dev_priv, CDCLK_SQUASH_CTL); + + if (squash_ctl & CDCLK_SQUASH_ENABLE) { + u16 waveform; + int size; + + size = REG_FIELD_GET(CDCLK_SQUASH_WINDOW_SIZE_MASK, squash_ctl) + 1; + waveform = REG_FIELD_GET(CDCLK_SQUASH_WAVEFORM_MASK, squash_ctl) >> (16 - size); + + cdclk_config->cdclk = DIV_ROUND_CLOSEST(hweight16(waveform) * + cdclk_config->vco, size * div); + } else { + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); + } out: /* @@ -1466,7 +1536,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, * at least what the CDCLK frequency requires. */ cdclk_config->voltage_level = - dev_priv->display.calc_voltage_level(cdclk_config->cdclk); + intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) @@ -1593,6 +1663,26 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv, } } +static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv, + int cdclk) +{ + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + if (cdclk == dev_priv->cdclk.hw.bypass) + return 0; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk == cdclk) + return table[i].waveform; + + drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); + + return 0xffff; +} + static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) @@ -1600,6 +1690,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 val; + u16 waveform; + int clock; int ret; /* Inform power controller of upcoming frequency change. */ @@ -1643,7 +1735,24 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, bxt_de_pll_enable(dev_priv, vco); } - val = bxt_cdclk_cd2x_div_sel(dev_priv, cdclk, vco) | + waveform = cdclk_squash_waveform(dev_priv, cdclk); + + if (waveform) + clock = vco / 2; + else + clock = cdclk; + + if (has_cdclk_squasher(dev_priv)) { + u32 squash_ctl = 0; + + if (waveform) + squash_ctl = CDCLK_SQUASH_ENABLE | + CDCLK_SQUASH_WINDOW_SIZE(0xf) | waveform; + + intel_de_write(dev_priv, CDCLK_SQUASH_CTL, squash_ctl); + } + + val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) | bxt_cdclk_cd2x_pipe(dev_priv, pipe) | skl_cdclk_decimal(cdclk); @@ -1657,7 +1766,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, CDCLK_CTL, val); if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, @@ -1695,7 +1804,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - int cdclk, vco; + int cdclk, clock, vco; intel_update_cdclk(dev_priv); intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -1731,8 +1840,12 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) expected = skl_cdclk_decimal(cdclk); /* Figure out what CD2X divider we should be using for this cdclk */ - expected |= bxt_cdclk_cd2x_div_sel(dev_priv, - dev_priv->cdclk.hw.cdclk, + if (has_cdclk_squasher(dev_priv)) + clock = dev_priv->cdclk.hw.vco / 2; + else + clock = dev_priv->cdclk.hw.cdclk; + + expected |= bxt_cdclk_cd2x_div_sel(dev_priv, clock, dev_priv->cdclk.hw.vco); /* @@ -1777,7 +1890,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); cdclk_config.voltage_level = - dev_priv->display.calc_voltage_level(cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } @@ -1789,7 +1902,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) cdclk_config.cdclk = cdclk_config.bypass; cdclk_config.vco = 0; cdclk_config.voltage_level = - dev_priv->display.calc_voltage_level(cdclk_config.cdclk); + intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk); bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } @@ -1848,6 +1961,25 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv, a->ref == b->ref; } +static bool intel_cdclk_can_squash(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) +{ + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (!has_cdclk_squasher(dev_priv)) + return false; + + return a->cdclk != b->cdclk && + a->vco != 0 && + a->vco == b->vco && + a->ref == b->ref; +} + /** * intel_cdclk_needs_modeset - Determine if changong between the CDCLK * configurations requires a modeset on all pipes @@ -1885,7 +2017,17 @@ static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) < 10 && !IS_BROXTON(dev_priv)) return false; + /* + * FIXME should store a bit more state in intel_cdclk_config + * to differentiate squasher vs. cd2x divider properly. For + * the moment all platforms with squasher use a fixed cd2x + * divider. + */ + if (has_cdclk_squasher(dev_priv)) + return false; + return a->cdclk != b->cdclk && + a->vco != 0 && a->vco == b->vco && a->ref == b->ref; } @@ -1932,7 +2074,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) return; intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); @@ -1943,6 +2085,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_pause(intel_dp); } + intel_audio_cdclk_change_pre(dev_priv); + /* * Lock aux/gmbus while we change cdclk in case those * functions use cdclk. Not all platforms/ports do, @@ -1956,7 +2100,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, &dev_priv->gmbus_mutex); } - dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe); + intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe); for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1971,6 +2115,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_resume(intel_dp); } + intel_audio_cdclk_change_post(dev_priv); + if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { @@ -2140,6 +2286,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); /* + * When we decide to use only one VDSC engine, since + * each VDSC operates with 1 ppc throughput, pixel clock + * cannot be higher than the VDSC clock (cdclk) + */ + if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split) + min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); + + /* * HACK. Currently for TGL platforms we calculate * min_cdclk initially based on pixel_rate divided * by 2, accounting for also plane requirements, @@ -2414,7 +2568,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) cdclk_state->logical.cdclk = cdclk; cdclk_state->logical.voltage_level = max_t(int, min_voltage_level, - dev_priv->display.calc_voltage_level(cdclk)); + intel_cdclk_calc_voltage_level(dev_priv, cdclk)); if (!cdclk_state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); @@ -2423,7 +2577,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) cdclk_state->actual.vco = vco; cdclk_state->actual.cdclk = cdclk; cdclk_state->actual.voltage_level = - dev_priv->display.calc_voltage_level(cdclk); + intel_cdclk_calc_voltage_level(dev_priv, cdclk); } else { cdclk_state->actual = cdclk_state->logical; } @@ -2484,6 +2638,58 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state) return to_intel_cdclk_state(cdclk_state); } +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state; + const struct intel_cdclk_state *new_cdclk_state; + struct intel_plane_state *plane_state; + struct intel_bw_state *new_bw_state; + struct intel_plane *plane; + int min_cdclk = 0; + enum pipe pipe; + int ret; + int i; + + /* + * active_planes bitmask has been updated, and potentially affected + * planes are part of the state. We can now compute the minimum cdclk + * for each plane. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); + if (ret) + return ret; + } + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + + if (new_cdclk_state && + old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) + *need_cdclk_calc = true; + + ret = intel_cdclk_bw_calc_min_cdclk(state); + if (ret) + return ret; + + new_bw_state = intel_atomic_get_new_bw_state(state); + + if (!new_cdclk_state || !new_bw_state) + return 0; + + for_each_pipe(i915, pipe) { + min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); + + /* Currently do this change only if we need to increase */ + if (new_bw_state->min_cdclk > min_cdclk) + *need_cdclk_calc = true; + } + + return 0; +} + int intel_cdclk_init(struct drm_i915_private *dev_priv) { struct intel_cdclk_state *cdclk_state; @@ -2515,7 +2721,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) new_cdclk_state->active_pipes = intel_calc_active_pipes(state, old_cdclk_state->active_pipes); - ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state); + ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state); if (ret) return ret; @@ -2547,7 +2753,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) struct intel_crtc_state *crtc_state; pipe = ilog2(new_cdclk_state->active_pipes); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) @@ -2557,9 +2763,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) pipe = INVALID_PIPE; } - if (intel_cdclk_can_crawl(dev_priv, - &old_cdclk_state->actual, - &new_cdclk_state->actual)) { + if (intel_cdclk_can_squash(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { + drm_dbg_kms(&dev_priv->drm, + "Can change cdclk via squasher\n"); + } else if (intel_cdclk_can_crawl(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { drm_dbg_kms(&dev_priv->drm, "Can change cdclk via crawl\n"); } else if (pipe != INVALID_PIPE) { @@ -2695,7 +2906,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) */ void intel_update_cdclk(struct drm_i915_private *dev_priv) { - dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw); + intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw); /* * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): @@ -2845,6 +3056,157 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) return freq; } +static const struct intel_cdclk_funcs tgl_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = tgl_calc_voltage_level, +}; + +static const struct intel_cdclk_funcs ehl_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = ehl_calc_voltage_level, +}; + +static const struct intel_cdclk_funcs icl_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = icl_calc_voltage_level, +}; + +static const struct intel_cdclk_funcs bxt_cdclk_funcs = { + .get_cdclk = bxt_get_cdclk, + .set_cdclk = bxt_set_cdclk, + .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, + .modeset_calc_cdclk = bxt_modeset_calc_cdclk, + .calc_voltage_level = bxt_calc_voltage_level, +}; + +static const struct intel_cdclk_funcs skl_cdclk_funcs = { + .get_cdclk = skl_get_cdclk, + .set_cdclk = skl_set_cdclk, + .bw_calc_min_cdclk = skl_bw_calc_min_cdclk, + .modeset_calc_cdclk = skl_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs bdw_cdclk_funcs = { + .get_cdclk = bdw_get_cdclk, + .set_cdclk = bdw_set_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = bdw_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs chv_cdclk_funcs = { + .get_cdclk = vlv_get_cdclk, + .set_cdclk = chv_set_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = vlv_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs vlv_cdclk_funcs = { + .get_cdclk = vlv_get_cdclk, + .set_cdclk = vlv_set_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = vlv_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs hsw_cdclk_funcs = { + .get_cdclk = hsw_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +/* SNB, IVB, 965G, 945G */ +static const struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = { + .get_cdclk = fixed_400mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs ilk_cdclk_funcs = { + .get_cdclk = fixed_450mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs gm45_cdclk_funcs = { + .get_cdclk = gm45_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +/* G45 uses G33 */ + +static const struct intel_cdclk_funcs i965gm_cdclk_funcs = { + .get_cdclk = i965gm_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +/* i965G uses fixed 400 */ + +static const struct intel_cdclk_funcs pnv_cdclk_funcs = { + .get_cdclk = pnv_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs g33_cdclk_funcs = { + .get_cdclk = g33_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i945gm_cdclk_funcs = { + .get_cdclk = i945gm_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +/* i945G uses fixed 400 */ + +static const struct intel_cdclk_funcs i915gm_cdclk_funcs = { + .get_cdclk = i915gm_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i915g_cdclk_funcs = { + .get_cdclk = fixed_333mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i865g_cdclk_funcs = { + .get_cdclk = fixed_266mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i85x_cdclk_funcs = { + .get_cdclk = i85x_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i845g_cdclk_funcs = { + .get_cdclk = fixed_200mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + +static const struct intel_cdclk_funcs i830_cdclk_funcs = { + .get_cdclk = fixed_133mhz_get_cdclk, + .bw_calc_min_cdclk = intel_bw_calc_min_cdclk, + .modeset_calc_cdclk = fixed_modeset_calc_cdclk, +}; + /** * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks * @dev_priv: i915 device @@ -2852,119 +3214,78 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv) void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_DG2(dev_priv)) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk_funcs = &tgl_cdclk_funcs; dev_priv->cdclk.table = dg2_cdclk_table; } else if (IS_ALDERLAKE_P(dev_priv)) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk_funcs = &tgl_cdclk_funcs; /* Wa_22011320316:adl-p[a0] */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) dev_priv->cdclk.table = adlp_a_step_cdclk_table; else dev_priv->cdclk.table = adlp_cdclk_table; } else if (IS_ROCKETLAKE(dev_priv)) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk_funcs = &tgl_cdclk_funcs; dev_priv->cdclk.table = rkl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 12) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk_funcs = &tgl_cdclk_funcs; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_JSL_EHL(dev_priv)) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; + dev_priv->cdclk_funcs = &ehl_cdclk_funcs; dev_priv->cdclk.table = icl_cdclk_table; } else if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = icl_calc_voltage_level; + dev_priv->cdclk_funcs = &icl_cdclk_funcs; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; - dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; + dev_priv->cdclk_funcs = &bxt_cdclk_funcs; if (IS_GEMINILAKE(dev_priv)) dev_priv->cdclk.table = glk_cdclk_table; else dev_priv->cdclk.table = bxt_cdclk_table; } else if (DISPLAY_VER(dev_priv) == 9) { - dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk; - dev_priv->display.set_cdclk = skl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; + dev_priv->cdclk_funcs = &skl_cdclk_funcs; } else if (IS_BROADWELL(dev_priv)) { - dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; - dev_priv->display.set_cdclk = bdw_set_cdclk; - dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk; + dev_priv->cdclk_funcs = &bdw_cdclk_funcs; + } else if (IS_HASWELL(dev_priv)) { + dev_priv->cdclk_funcs = &hsw_cdclk_funcs; } else if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; - dev_priv->display.set_cdclk = chv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; + dev_priv->cdclk_funcs = &chv_cdclk_funcs; } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; - dev_priv->display.set_cdclk = vlv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; - } else { - dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk; - dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk; - } - - if (DISPLAY_VER(dev_priv) >= 10 || IS_BROXTON(dev_priv)) - dev_priv->display.get_cdclk = bxt_get_cdclk; - else if (DISPLAY_VER(dev_priv) == 9) - dev_priv->display.get_cdclk = skl_get_cdclk; - else if (IS_BROADWELL(dev_priv)) - dev_priv->display.get_cdclk = bdw_get_cdclk; - else if (IS_HASWELL(dev_priv)) - dev_priv->display.get_cdclk = hsw_get_cdclk; - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->display.get_cdclk = vlv_get_cdclk; - else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) - dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; - else if (IS_IRONLAKE(dev_priv)) - dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk; - else if (IS_GM45(dev_priv)) - dev_priv->display.get_cdclk = gm45_get_cdclk; - else if (IS_G45(dev_priv)) - dev_priv->display.get_cdclk = g33_get_cdclk; - else if (IS_I965GM(dev_priv)) - dev_priv->display.get_cdclk = i965gm_get_cdclk; - else if (IS_I965G(dev_priv)) - dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; - else if (IS_PINEVIEW(dev_priv)) - dev_priv->display.get_cdclk = pnv_get_cdclk; - else if (IS_G33(dev_priv)) - dev_priv->display.get_cdclk = g33_get_cdclk; - else if (IS_I945GM(dev_priv)) - dev_priv->display.get_cdclk = i945gm_get_cdclk; - else if (IS_I945G(dev_priv)) - dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk; - else if (IS_I915GM(dev_priv)) - dev_priv->display.get_cdclk = i915gm_get_cdclk; - else if (IS_I915G(dev_priv)) - dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk; - else if (IS_I865G(dev_priv)) - dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk; - else if (IS_I85X(dev_priv)) - dev_priv->display.get_cdclk = i85x_get_cdclk; - else if (IS_I845G(dev_priv)) - dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk; - else if (IS_I830(dev_priv)) - dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; - - if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk, - "Unknown platform. Assuming 133 MHz CDCLK\n")) - dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; + dev_priv->cdclk_funcs = &vlv_cdclk_funcs; + } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { + dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + } else if (IS_IRONLAKE(dev_priv)) { + dev_priv->cdclk_funcs = &ilk_cdclk_funcs; + } else if (IS_GM45(dev_priv)) { + dev_priv->cdclk_funcs = &gm45_cdclk_funcs; + } else if (IS_G45(dev_priv)) { + dev_priv->cdclk_funcs = &g33_cdclk_funcs; + } else if (IS_I965GM(dev_priv)) { + dev_priv->cdclk_funcs = &i965gm_cdclk_funcs; + } else if (IS_I965G(dev_priv)) { + dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + } else if (IS_PINEVIEW(dev_priv)) { + dev_priv->cdclk_funcs = &pnv_cdclk_funcs; + } else if (IS_G33(dev_priv)) { + dev_priv->cdclk_funcs = &g33_cdclk_funcs; + } else if (IS_I945GM(dev_priv)) { + dev_priv->cdclk_funcs = &i945gm_cdclk_funcs; + } else if (IS_I945G(dev_priv)) { + dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs; + } else if (IS_I915GM(dev_priv)) { + dev_priv->cdclk_funcs = &i915gm_cdclk_funcs; + } else if (IS_I915G(dev_priv)) { + dev_priv->cdclk_funcs = &i915g_cdclk_funcs; + } else if (IS_I865G(dev_priv)) { + dev_priv->cdclk_funcs = &i865g_cdclk_funcs; + } else if (IS_I85X(dev_priv)) { + dev_priv->cdclk_funcs = &i85x_cdclk_funcs; + } else if (IS_I845G(dev_priv)) { + dev_priv->cdclk_funcs = &i845g_cdclk_funcs; + } else if (IS_I830(dev_priv)) { + dev_priv->cdclk_funcs = &i830_cdclk_funcs; + } + + if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs, + "Unknown platform. Assuming i830\n")) + dev_priv->cdclk_funcs = &i830_cdclk_funcs; } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index b34eb00fb327..fc638522e445 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -16,13 +16,6 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; -struct intel_cdclk_vals { - u32 cdclk; - u16 refclk; - u8 divider; /* CD2X divider * 2 */ - u8 ratio; -}; - struct intel_cdclk_state { struct intel_global_state base; @@ -68,7 +61,10 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); - +void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_config *cdclk_config); +int intel_cdclk_atomic_check(struct intel_atomic_state *state, + bool *need_cdclk_calc); struct intel_cdclk_state * intel_atomic_get_cdclk_state(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index afcb4bf3826c..de3ded1e327a 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -25,6 +25,8 @@ #include "intel_color.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dpll.h" +#include "vlv_dsi_pll.h" #define CTM_COEFF_SIGN (1ULL << 63) @@ -550,8 +552,8 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) @@ -574,15 +576,15 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), - i965_lut_10p6_ldw(&lut[i])); - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), - i965_lut_10p6_udw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), + i965_lut_10p6_ldw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), + i965_lut_10p6_udw(&lut[i])); } - intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) @@ -616,8 +618,8 @@ static void ilk_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, LGC_PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void ilk_load_lut_10(struct intel_crtc *crtc, @@ -629,8 +631,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) - intel_de_write(dev_priv, PREC_PALETTE(pipe, i), - ilk_lut_10(&lut[i])); + intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i), + ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) @@ -679,16 +681,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc, const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } /* On BDW+ the index auto increment mode actually works */ @@ -702,23 +704,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { /* We discard half the user entries in split gamma mode */ const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) @@ -806,6 +808,14 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) } } +static int glk_degamma_lut_size(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + return 131; + else + return 35; +} + static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -819,14 +829,14 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { /* - * First 33 entries represent range from 0 to 1.0 - * 34th and 35th entry will represent extended range + * First lut_size entries represent range from 0 to 1.0 + * 3 additional lut entries will represent extended range * inputs 3.0 and 7.0 respectively, currently clamped * at 1.0. Since the precision is 16bit, the user * value can be directly filled to register. @@ -837,15 +847,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), - lut[i].green); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), + lut[i].green); } /* Clamp values > 1.0. */ - while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + while (i++ < glk_degamma_lut_size(dev_priv)) + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) @@ -860,21 +870,21 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { u32 v = (i << 16) / (lut_size - 1); - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); } /* Clamp values > 1.0. */ while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) @@ -1069,10 +1079,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), - chv_cgm_degamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), - chv_cgm_degamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), + chv_cgm_degamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), + chv_cgm_degamma_udw(&lut[i])); } } @@ -1103,10 +1113,10 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), - chv_cgm_gamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), - chv_cgm_gamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), + chv_cgm_gamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), + chv_cgm_gamma_udw(&lut[i])); } } @@ -1129,22 +1139,22 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) else i965_load_luts(crtc_state); - intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), - crtc_state->cgm_mode); + intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe), + crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->display.load_luts(crtc_state); + dev_priv->color_funcs->load_luts(crtc_state); } void intel_color_commit(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - dev_priv->display.color_commit(crtc_state); + dev_priv->color_funcs->color_commit(crtc_state); } static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state) @@ -1200,15 +1210,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - return dev_priv->display.color_check(crtc_state); + return dev_priv->color_funcs->color_check(crtc_state); } void intel_color_get_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (dev_priv->display.read_luts) - dev_priv->display.read_luts(crtc_state); + if (dev_priv->color_funcs->read_luts) + dev_priv->color_funcs->read_luts(crtc_state); } static bool need_plane_update(struct intel_plane *plane, @@ -1572,6 +1582,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state) static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 gamma_mode = 0; if (crtc_state->hw.degamma_lut) @@ -1584,6 +1596,13 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state) if (!crtc_state->hw.gamma_lut || crtc_state_is_legacy_gamma(crtc_state)) gamma_mode |= GAMMA_MODE_MODE_8BIT; + /* + * Enable 10bit gamma for D13 + * ToDo: Extend to Logarithmic Gamma once the new UAPI + * is acccepted and implemented by a userspace consumer + */ + else if (DISPLAY_VER(i915) >= 13) + gamma_mode |= GAMMA_MODE_MODE_10BIT; else gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED; @@ -1806,7 +1825,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1841,15 +1860,15 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); - u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } - lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); - lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } @@ -1884,8 +1903,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); - u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } @@ -1920,7 +1939,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1945,7 +1964,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } @@ -1997,16 +2016,16 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); + u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); return blob; } @@ -2048,17 +2067,17 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc) lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), - PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { - u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); - u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); icl_lut_multi_seg_pack(&lut[i], ldw, udw); } - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); /* * FIXME readouts from PAL_PREC_DATA register aren't giving @@ -2092,6 +2111,76 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state) } } +static const struct intel_color_funcs chv_color_funcs = { + .color_check = chv_color_check, + .color_commit = i9xx_color_commit, + .load_luts = chv_load_luts, + .read_luts = chv_read_luts, +}; + +static const struct intel_color_funcs i965_color_funcs = { + .color_check = i9xx_color_check, + .color_commit = i9xx_color_commit, + .load_luts = i965_load_luts, + .read_luts = i965_read_luts, +}; + +static const struct intel_color_funcs i9xx_color_funcs = { + .color_check = i9xx_color_check, + .color_commit = i9xx_color_commit, + .load_luts = i9xx_load_luts, + .read_luts = i9xx_read_luts, +}; + +static const struct intel_color_funcs icl_color_funcs = { + .color_check = icl_color_check, + .color_commit = skl_color_commit, + .load_luts = icl_load_luts, + .read_luts = icl_read_luts, +}; + +static const struct intel_color_funcs glk_color_funcs = { + .color_check = glk_color_check, + .color_commit = skl_color_commit, + .load_luts = glk_load_luts, + .read_luts = glk_read_luts, +}; + +static const struct intel_color_funcs skl_color_funcs = { + .color_check = ivb_color_check, + .color_commit = skl_color_commit, + .load_luts = bdw_load_luts, + .read_luts = NULL, +}; + +static const struct intel_color_funcs bdw_color_funcs = { + .color_check = ivb_color_check, + .color_commit = hsw_color_commit, + .load_luts = bdw_load_luts, + .read_luts = NULL, +}; + +static const struct intel_color_funcs hsw_color_funcs = { + .color_check = ivb_color_check, + .color_commit = hsw_color_commit, + .load_luts = ivb_load_luts, + .read_luts = NULL, +}; + +static const struct intel_color_funcs ivb_color_funcs = { + .color_check = ivb_color_check, + .color_commit = ilk_color_commit, + .load_luts = ivb_load_luts, + .read_luts = NULL, +}; + +static const struct intel_color_funcs ilk_color_funcs = { + .color_check = ilk_color_check, + .color_commit = ilk_color_commit, + .load_luts = ilk_load_luts, + .read_luts = ilk_read_luts, +}; + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -2101,52 +2190,28 @@ void intel_color_init(struct intel_crtc *crtc) if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.color_check = chv_color_check; - dev_priv->display.color_commit = i9xx_color_commit; - dev_priv->display.load_luts = chv_load_luts; - dev_priv->display.read_luts = chv_read_luts; + dev_priv->color_funcs = &chv_color_funcs; } else if (DISPLAY_VER(dev_priv) >= 4) { - dev_priv->display.color_check = i9xx_color_check; - dev_priv->display.color_commit = i9xx_color_commit; - dev_priv->display.load_luts = i965_load_luts; - dev_priv->display.read_luts = i965_read_luts; + dev_priv->color_funcs = &i965_color_funcs; } else { - dev_priv->display.color_check = i9xx_color_check; - dev_priv->display.color_commit = i9xx_color_commit; - dev_priv->display.load_luts = i9xx_load_luts; - dev_priv->display.read_luts = i9xx_read_luts; + dev_priv->color_funcs = &i9xx_color_funcs; } } else { if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->display.color_check = icl_color_check; - else if (DISPLAY_VER(dev_priv) >= 10) - dev_priv->display.color_check = glk_color_check; - else if (DISPLAY_VER(dev_priv) >= 7) - dev_priv->display.color_check = ivb_color_check; - else - dev_priv->display.color_check = ilk_color_check; - - if (DISPLAY_VER(dev_priv) >= 9) - dev_priv->display.color_commit = skl_color_commit; - else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - dev_priv->display.color_commit = hsw_color_commit; - else - dev_priv->display.color_commit = ilk_color_commit; - - if (DISPLAY_VER(dev_priv) >= 11) { - dev_priv->display.load_luts = icl_load_luts; - dev_priv->display.read_luts = icl_read_luts; - } else if (DISPLAY_VER(dev_priv) == 10) { - dev_priv->display.load_luts = glk_load_luts; - dev_priv->display.read_luts = glk_read_luts; - } else if (DISPLAY_VER(dev_priv) >= 8) { - dev_priv->display.load_luts = bdw_load_luts; - } else if (DISPLAY_VER(dev_priv) >= 7) { - dev_priv->display.load_luts = ivb_load_luts; - } else { - dev_priv->display.load_luts = ilk_load_luts; - dev_priv->display.read_luts = ilk_read_luts; - } + dev_priv->color_funcs = &icl_color_funcs; + else if (DISPLAY_VER(dev_priv) == 10) + dev_priv->color_funcs = &glk_color_funcs; + else if (DISPLAY_VER(dev_priv) == 9) + dev_priv->color_funcs = &skl_color_funcs; + else if (DISPLAY_VER(dev_priv) == 8) + dev_priv->color_funcs = &bdw_color_funcs; + else if (DISPLAY_VER(dev_priv) == 7) { + if (IS_HASWELL(dev_priv)) + dev_priv->color_funcs = &hsw_color_funcs; + else + dev_priv->color_funcs = &ivb_color_funcs; + } else + dev_priv->color_funcs = &ilk_color_funcs; } drm_crtc_enable_color_mgmt(&crtc->base, diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index bacdf8a16bcb..f628e0542933 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -220,13 +220,13 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, return false; if (DISPLAY_VER(dev_priv) >= 12) { - ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy), ICL_PORT_TX_DW8_ODCC_CLK_SEL | ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, ICL_PORT_TX_DW8_ODCC_CLK_SEL | ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); - ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy), DCC_MODE_SELECT_MASK, DCC_MODE_SELECT_CONTINUOSLY); } @@ -301,7 +301,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; - val |= lane_mask << PWR_DOWN_LN_SHIFT; + val |= lane_mask; intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); } @@ -343,13 +343,13 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) skip_phy_misc: if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy)); val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK; val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL; val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2; intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val); - val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); val &= ~DCC_MODE_SELECT_MASK; val |= DCC_MODE_SELECT_CONTINUOSLY; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 9bed1ccecea0..c65f95a9a1ec 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -29,13 +29,13 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> -#include "display/intel_panel.h" - #include "i915_drv.h" +#include "intel_backlight.h" #include "intel_connector.h" #include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_hdcp.h" +#include "intel_panel.h" int intel_connector_init(struct intel_connector *connector) { @@ -124,7 +124,7 @@ int intel_connector_register(struct drm_connector *connector) goto err_backlight; } - intel_connector_debugfs_add(connector); + intel_connector_debugfs_add(intel_connector); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 408f82b0dc7d..6a3893c8ff22 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -45,6 +45,7 @@ #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" +#include "intel_pch_display.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ @@ -143,7 +144,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder, static void hsw_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + lpt_pch_get_config(pipe_config); hsw_ddi_get_config(encoder, pipe_config); @@ -152,8 +153,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); - - pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -247,11 +246,12 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_transcoder(old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state); @@ -261,10 +261,9 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); - lpt_disable_pch_transcoder(dev_priv); - lpt_disable_iclkip(dev_priv); + lpt_pch_disable(state, crtc); - intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state); + hsw_fdi_disable(encoder); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); @@ -314,16 +313,16 @@ static void hsw_enable_crt(struct intel_atomic_state *state, intel_ddi_enable_transcoder_func(encoder, crtc_state); - intel_enable_pipe(crtc_state); + intel_enable_transcoder(crtc_state); - lpt_pch_enable(crtc_state); + lpt_pch_enable(state, crtc); intel_crtc_vblank_on(crtc_state); intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -722,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) intel_uncore_posting_read(uncore, pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 254e67141a77..16c3ca66d9f0 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -3,29 +3,31 @@ * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> +#include <linux/pm_qos.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_vblank_work.h> -#include "i915_trace.h" #include "i915_vgpu.h" - +#include "i9xx_plane.h" +#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_cursor.h" #include "intel_display_debugfs.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dsi.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" #include "intel_vrr.h" -#include "i9xx_plane.h" #include "skl_universal_plane.h" static void assert_vblank_disabled(struct drm_crtc *crtc) @@ -34,6 +36,38 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) drm_crtc_vblank_put(crtc); } +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) +{ + return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); +} + +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + if (crtc->pipe == pipe) + return crtc; + } + + return NULL; +} + +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) +{ + drm_crtc_wait_one_vblank(&crtc->base); +} + +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); + + if (crtc->active) + intel_crtc_wait_for_next_vblank(crtc); +} + u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -167,6 +201,8 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); + drm_crtc_cleanup(&crtc->base); kfree(crtc); } @@ -323,18 +359,6 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) if (ret) goto fail; - BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) || - dev_priv->pipe_to_crtc_mapping[pipe] != NULL); - dev_priv->pipe_to_crtc_mapping[pipe] = crtc; - - if (DISPLAY_VER(dev_priv) < 9) { - enum i9xx_plane_id i9xx_plane = primary->i9xx_plane; - - BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || - dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL); - dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc; - } - if (DISPLAY_VER(dev_priv) >= 11) drm_crtc_create_scaling_filter_property(&crtc->base, BIT(DRM_SCALING_FILTER_DEFAULT) | @@ -344,6 +368,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) intel_crtc_crc_init(crtc); + cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); + drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; @@ -354,6 +380,65 @@ fail: return ret; } +static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->hw.active && + !intel_crtc_needs_modeset(crtc_state) && + !crtc_state->preload_luts && + (crtc_state->uapi.color_mgmt_changed || + crtc_state->update_pipe); +} + +static void intel_crtc_vblank_work(struct kthread_work *base) +{ + struct drm_vblank_work *work = to_drm_vblank_work(base); + struct intel_crtc_state *crtc_state = + container_of(work, typeof(*crtc_state), vblank_work); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_crtc_vblank_work_start(crtc); + + intel_color_load_luts(crtc_state); + + if (crtc_state->uapi.event) { + spin_lock_irq(&crtc->base.dev->event_lock); + drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); + crtc_state->uapi.event = NULL; + spin_unlock_irq(&crtc->base.dev->event_lock); + } + + trace_intel_crtc_vblank_work_end(crtc); +} + +static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, + intel_crtc_vblank_work); + /* + * Interrupt latency is critical for getting the vblank + * work executed as early as possible during the vblank. + */ + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); +} + +void intel_wait_for_vblank_workers(struct intel_atomic_state *state) +{ + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (!intel_crtc_needs_vblank_work(crtc_state)) + continue; + + drm_vblank_work_flush(&crtc_state->vblank_work); + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, + PM_QOS_DEFAULT_VALUE); + } +} + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -387,7 +472,7 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode) * until a subsequent call to intel_pipe_update_end(). That is done to * avoid random delays. */ -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -402,10 +487,17 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) if (new_crtc_state->uapi.async_flip) return; - if (new_crtc_state->vrr.enable) - vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); - else + if (intel_crtc_needs_vblank_work(new_crtc_state)) + intel_crtc_vblank_work_init(new_crtc_state); + + if (new_crtc_state->vrr.enable) { + if (intel_vrr_is_push_sent(new_crtc_state)) + vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state); + else + vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); + } else { vblank_start = intel_mode_vblank_start(adjusted_mode); + } /* FIXME needs to be calibrated sensibly */ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, @@ -554,7 +646,11 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ - if (new_crtc_state->uapi.event) { + if (intel_crtc_needs_vblank_work(new_crtc_state)) { + drm_vblank_work_schedule(&new_crtc_state->vblank_work, + drm_crtc_accurate_vblank_count(&crtc->base) + 1, + false); + } else if (new_crtc_state->uapi.event) { drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base) != 0); @@ -566,11 +662,24 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) new_crtc_state->uapi.event = NULL; } - local_irq_enable(); - - /* Send VRR Push to terminate Vblank */ + /* + * Send VRR Push to terminate Vblank. If we are already in vblank + * this has to be done _after_ sampling the frame counter, as + * otherwise the push would immediately terminate the vblank and + * the sampled frame counter would correspond to the next frame + * instead of the current frame. + * + * There is a tiny race here (iff vblank evasion failed us) where + * we might sample the frame counter just before vmax vblank start + * but the push would be sent just after it. That would cause the + * push to affect the next frame instead of the current frame, + * which would cause the next frame to terminate already at vmin + * vblank start instead of vmax vblank start. + */ intel_vrr_send_push(new_crtc_state); + local_irq_enable(); + if (intel_vgpu_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index a5ae997581aa..73077137fb99 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -8,11 +8,16 @@ #include <linux/types.h> +enum i9xx_plane_id; enum pipe; +struct drm_display_mode; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); @@ -21,5 +26,14 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state); +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); +void intel_wait_for_vblank_workers(struct intel_atomic_state *state); +struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915); +struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, + enum pipe pipe); +void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, + enum pipe pipe); +void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc); #endif diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index c7618fef0143..16d34685d83f 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -17,7 +17,7 @@ #include "intel_display_types.h" #include "intel_display.h" #include "intel_fb.h" - +#include "intel_fb_pin.h" #include "intel_frontbuffer.h" #include "intel_pm.h" #include "intel_psr.h" @@ -28,11 +28,6 @@ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static const u64 cursor_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = @@ -195,7 +190,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, { return CURSOR_ENABLE | CURSOR_FORMAT_ARGB | - CURSOR_STRIDE(plane_state->view.color_plane[0].stride); + CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride); } static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) @@ -234,7 +229,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&i915->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); switch (fb->pitches[0]) { case 256: @@ -253,9 +248,10 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i845_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i845_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 cntl = 0, base = 0, pos = 0, size = 0; @@ -298,10 +294,10 @@ static void i845_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i845_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i845_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i845_update_cursor(plane, crtc_state, NULL); + i845_cursor_update_arm(plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -455,7 +451,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); if (fb->pitches[0] != drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { @@ -488,9 +484,10 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i9xx_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -536,8 +533,10 @@ static void i9xx_update_cursor(struct intel_plane *plane, if (DISPLAY_VER(dev_priv) >= 9) skl_write_cursor_wm(plane, crtc_state); - if (!intel_crtc_needs_modeset(crtc_state)) + if (plane_state) intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); + else + intel_psr2_disable_plane_sel_fetch(plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || @@ -560,10 +559,10 @@ static void i9xx_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i9xx_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i9xx_update_cursor(plane, crtc_state, NULL); + i9xx_cursor_update_arm(plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -603,8 +602,10 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - return modifier == DRM_FORMAT_MOD_LINEAR && - format == DRM_FORMAT_ARGB8888; + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) + return false; + + return format == DRM_FORMAT_ARGB8888; } static int @@ -637,8 +638,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane, * FIXME bigjoiner fastpath would be good */ if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) || - crtc_state->update_pipe || crtc_state->bigjoiner || - crtc_state->enable_psr2_sel_fetch) + crtc_state->update_pipe || crtc_state->bigjoiner) goto slow; /* @@ -696,7 +696,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane, goto out_free; intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), - ORIGIN_FLIP); + ORIGIN_CURSOR_UPDATE); intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), to_intel_frontbuffer(new_plane_state->hw.fb), plane->frontbuffer_bit); @@ -716,10 +716,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (new_plane_state->uapi.visible) - intel_update_plane(plane, crtc_state, new_plane_state); - else - intel_disable_plane(plane, crtc_state); + if (new_plane_state->uapi.visible) { + intel_plane_update_noarm(plane, crtc_state, new_plane_state); + intel_plane_update_arm(plane, crtc_state, new_plane_state); + } else { + intel_plane_disable_arm(plane, crtc_state); + } intel_plane_unpin_fb(old_plane_state); @@ -753,6 +755,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, { struct intel_plane *cursor; int ret, zpos; + u64 *modifiers; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) @@ -765,14 +768,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->max_stride = i845_cursor_max_stride; - cursor->update_plane = i845_update_cursor; - cursor->disable_plane = i845_disable_cursor; + cursor->update_arm = i845_cursor_update_arm; + cursor->disable_arm = i845_cursor_disable_arm; cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->max_stride = i9xx_cursor_max_stride; - cursor->update_plane = i9xx_update_cursor; - cursor->disable_plane = i9xx_disable_cursor; + cursor->update_arm = i9xx_cursor_update_arm; + cursor->disable_arm = i9xx_cursor_disable_arm; cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -783,13 +786,18 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE); + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - cursor_format_modifiers, + modifiers, DRM_PLANE_TYPE_CURSOR, "cursor %c", pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9903a78df896..9c9d574f0b8c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -25,10 +25,12 @@ * */ +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_scdc_helper.h> #include "i915_drv.h" #include "intel_audio.h" +#include "intel_backlight.h" #include "intel_combo_phy.h" #include "intel_connector.h" #include "intel_crtc.h" @@ -40,6 +42,7 @@ #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" +#include "intel_drrs.h" #include "intel_dsi.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" @@ -48,7 +51,6 @@ #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_lspcon.h" -#include "intel_panel.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_snps_phy.h" @@ -73,24 +75,27 @@ static const u8 index_to_dp_signal_levels[] = { }; static int intel_ddi_hdmi_level(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) + const struct intel_ddi_buf_trans *trans) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int n_entries, level, default_entry; + int level; - n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry); - if (n_entries == 0) - return 0; level = intel_bios_hdmi_level_shift(encoder); if (level < 0) - level = default_entry; - - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; + level = trans->hdmi_default_entry; return level; } +static bool has_buf_trans_select(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915); +} + +static bool has_iboost(struct drm_i915_private *i915) +{ + return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915); +} + /* * Starting with Haswell, DDI port buffers must be programmed with correct * values in advance. This function programs the correct values for @@ -103,22 +108,22 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, u32 iboost_bit = 0; int i, n_entries; enum port port = encoder->port; - const struct intel_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *trans; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; /* If we're boosting the current, set bit 31 of trans1 */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && + if (has_iboost(dev_priv) && intel_bios_encoder_dp_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i), - ddi_translations->entries[i].hsw.trans1 | iboost_bit); + trans->entries[i].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i), - ddi_translations->entries[i].hsw.trans2); + trans->entries[i].hsw.trans2); } } @@ -128,31 +133,29 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, * HDMI/DVI use cases. */ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int level = intel_ddi_level(encoder, crtc_state, 0); u32 iboost_bit = 0; int n_entries; enum port port = encoder->port; - const struct intel_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *trans; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; /* If we're boosting the current, set bit 31 of trans1 */ - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && + if (has_iboost(dev_priv) && intel_bios_encoder_hdmi_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9), - ddi_translations->entries[level].hsw.trans1 | iboost_bit); + trans->entries[level].hsw.trans1 | iboost_bit); intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9), - ddi_translations->entries[level].hsw.trans2); + trans->entries[level].hsw.trans2); } void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, @@ -281,13 +284,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum phy phy = intel_port_to_phy(i915, encoder->port); + /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */ intel_dp->DP = dig_port->saved_port_bits | - DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); - intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count); + DDI_PORT_WIDTH(crtc_state->lane_count) | + DDI_BUF_TRANS_SELECT(0); if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) { intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock); - if (dig_port->tc_mode != TC_PORT_TBT_ALT) + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP; } } @@ -318,10 +322,11 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; + /* CRT dotclock is determined via other means */ if (pipe_config->has_pch_encoder) - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->fdi_m_n); - else if (intel_crtc_has_dp_encoder(pipe_config)) + return; + + if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) @@ -407,6 +412,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder) return master_transcoder + 1; } +static void +intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val = 0; + + if (intel_dp_is_uhbr(crtc_state)) + val = TRANS_DP2_128B132B_CHANNEL_CODING; + + intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val); +} + /* * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. * @@ -488,10 +507,13 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder, if (crtc_state->hdmi_high_tmds_clock_ratio) temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { - temp |= TRANS_DDI_MODE_SELECT_FDI; + temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; temp |= (crtc_state->fdi_lanes - 1) << 1; } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { - temp |= TRANS_DDI_MODE_SELECT_DP_MST; + if (intel_dp_is_uhbr(crtc_state)) + temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B; + else + temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); if (DISPLAY_VER(dev_priv) >= 12) { @@ -678,8 +700,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) ret = false; break; - case TRANS_DDI_MODE_SELECT_FDI: - ret = type == DRM_MODE_CONNECTOR_VGA; + case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B: + if (HAS_DP20(dev_priv)) + /* 128b/132b */ + ret = false; + else + /* FDI */ + ret = type == DRM_MODE_CONNECTOR_VGA; break; default: @@ -766,8 +793,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if ((tmp & port_mask) != ddi_select) continue; - if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == - TRANS_DDI_MODE_SELECT_DP_MST) + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST || + (HAS_DP20(dev_priv) && + (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B)) mst_pipe_mask |= BIT(p); *pipe_mask |= BIT(p); @@ -861,8 +889,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, dig_port = enc_to_dig_port(encoder); - if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) { + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); @@ -947,16 +974,14 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, iboost = intel_bios_encoder_dp_boost_level(encoder->devdata); if (iboost == 0) { - const struct intel_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *trans; int n_entries; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; - iboost = ddi_translations->entries[level].hsw.i_boost; + iboost = trans->entries[level].hsw.i_boost; } /* Make sure that the requested I_boost is valid */ @@ -971,28 +996,6 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } -static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_ddi_buf_trans *ddi_translations; - enum port port = encoder->port; - int n_entries; - - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) - return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; - - bxt_ddi_phy_set_signal_level(dev_priv, port, - ddi_translations->entries[level].bxt.margin, - ddi_translations->entries[level].bxt.scale, - ddi_translations->entries[level].bxt.enable, - ddi_translations->entries[level].bxt.deemphasis); -} - static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -1022,33 +1025,42 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp) return DP_TRAIN_PRE_EMPH_LEVEL_3; } +static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state, + int lane) +{ + if (crtc_state->port_clock > 600000) + return 0; + + if (crtc_state->lane_count == 4) + return lane >= 1 ? LOADGEN_SELECT : 0; + else + return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0; +} + static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; u32 val; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED; - intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations); + intel_dp->hobl_active = is_hobl_buf_trans(trans); intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val, intel_dp->hobl_active ? val : 0); } /* Set PORT_TX_DW5 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); @@ -1057,52 +1069,52 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); - val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(ddi_translations->entries[level].icl.dw2_swing_sel); - val |= SWING_SEL_LOWER(ddi_translations->entries[level].icl.dw2_swing_sel); - /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(0x98); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), + SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, + SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | + SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | + RCOMP_SCALAR(0x98)); + } /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ - for (ln = 0; ln <= 3; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - val |= POST_CURSOR_1(ddi_translations->entries[level].icl.dw4_post_cursor_1); - val |= POST_CURSOR_2(ddi_translations->entries[level].icl.dw4_post_cursor_2); - val |= CURSOR_COEFF(ddi_translations->entries[level].icl.dw4_cursor_coeff); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, + POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | + POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | + CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff)); } /* Program PORT_TX_DW7 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy)); - val &= ~N_SCALAR_MASK; - val |= N_SCALAR(ddi_translations->entries[level].icl.dw7_n_scalar); - intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), + N_SCALAR_MASK, + N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); + } } -static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) +static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - int width, rate, ln; u32 val; - - width = crtc_state->lane_count; - rate = crtc_state->port_clock; + int ln; /* * 1. If port type is eDP or DP, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) val &= ~COMMON_KEEPER_EN; else @@ -1111,107 +1123,95 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, /* 2. Program loadgen select */ /* - * Program PORT_TX_DW4_LN depending on Bit rate and used lanes + * Program PORT_TX_DW4 depending on Bit rate and used lanes * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1) * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0) * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ - for (ln = 0; ln <= 3; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~LOADGEN_SELECT; - - if ((rate <= 600000 && width == 4 && ln >= 1) || - (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { - val |= LOADGEN_SELECT; - } - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + for (ln = 0; ln < 4; ln++) { + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + LOADGEN_SELECT, + icl_combo_phy_loadgen_select(crtc_state, ln)); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - val |= SUS_CLOCK_CONFIG; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + 0, SUS_CLOCK_CONFIG); /* 4. Clear training enable to change swing values */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(encoder, crtc_state, level); + icl_ddi_combo_vswing_program(encoder, crtc_state); /* 6. Set training enable to trigger update */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); val |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); } -static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) +static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - const struct intel_ddi_buf_trans *ddi_translations; + const struct intel_ddi_buf_trans *trans; int n_entries, ln; - u32 val; - if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; - /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); + intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - ddi_translations->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - ddi_translations->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -1222,135 +1222,83 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); - if (crtc_state->port_clock < 300000) - val |= CFG_LOW_RATE_LKREN_EN; - else - val &= ~CFG_LOW_RATE_LKREN_EN; - intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), + CFG_LOW_RATE_LKREN_EN, + crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); + + intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, - MG_TX1_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), - val); - - val = intel_de_read(dev_priv, - MG_TX2_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), - val); + intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); + intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); } } -static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - - if (intel_phy_is_combo(dev_priv, phy)) - icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level); - else - icl_mg_phy_ddi_vswing_sequence(encoder, crtc_state, level); -} - -static void -tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) +static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - const struct intel_ddi_buf_trans *ddi_translations; - u32 val, dpcnt_mask, dpcnt_val; + const struct intel_ddi_buf_trans *trans; int n_entries, ln; - if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) - level = n_entries - 1; - - dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK); - dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control); - dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control); - dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control); for (ln = 0; ln < 2; ln++) { + int level; + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); - /* All the registers are RMW */ - val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val); - - val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val); - - val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port)); - val &= ~DKL_TX_DP20BITMODE; - intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val); - - if ((intel_crtc_has_dp_encoder(crtc_state) && - crtc_state->port_clock == 162000) || - (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - crtc_state->port_clock == 594000)) - val |= DKL_TX_LOADGEN_SHARING_PMD_DISABLE; - else - val &= ~DKL_TX_LOADGEN_SHARING_PMD_DISABLE; - } -} + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); -static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int level) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); - if (intel_phy_is_combo(dev_priv, phy)) - icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level); - else - tgl_dkl_phy_ddi_vswing_sequence(encoder, crtc_state, level); + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DP20BITMODE, 0); + } } static int translate_signal_level(struct intel_dp *intel_dp, @@ -1371,65 +1319,63 @@ static int translate_signal_level(struct intel_dp *intel_dp, return 0; } -static int intel_ddi_dp_level(struct intel_dp *intel_dp) +static int intel_ddi_dp_level(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int lane) { - u8 train_set = intel_dp->train_set[0]; - u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | - DP_TRAIN_PRE_EMPHASIS_MASK); - - return translate_signal_level(intel_dp, signal_levels); -} + u8 train_set = intel_dp->train_set[lane]; -static void -dg2_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - int level = intel_ddi_dp_level(intel_dp); + if (intel_dp_is_uhbr(crtc_state)) { + return train_set & DP_TX_FFE_PRESET_VALUE_MASK; + } else { + u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); - intel_snps_phy_ddi_vswing_sequence(encoder, level); + return translate_signal_level(intel_dp, signal_levels); + } } -static void -tgl_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +int intel_ddi_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int lane) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - int level = intel_ddi_dp_level(intel_dp); - - tgl_ddi_vswing_sequence(encoder, crtc_state, level); -} + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_ddi_buf_trans *trans; + int level, n_entries; -static void -icl_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - int level = intel_ddi_dp_level(intel_dp); + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&i915->drm, !trans)) + return 0; - icl_ddi_vswing_sequence(encoder, crtc_state, level); -} + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + level = intel_ddi_hdmi_level(encoder, trans); + else + level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state, + lane); -static void -bxt_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - int level = intel_ddi_dp_level(intel_dp); + if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries)) + level = n_entries - 1; - bxt_ddi_vswing_sequence(encoder, crtc_state, level); + return level; } static void -hsw_set_signal_levels(struct intel_dp *intel_dp, +hsw_set_signal_levels(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_dp_level(intel_dp); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + int level = intel_ddi_level(encoder, crtc_state, 0); enum port port = encoder->port; u32 signal_levels; + if (has_iboost(dev_priv)) + skl_ddi_set_iboost(encoder, crtc_state, level); + + /* HDMI ignores the rest */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + return; + signal_levels = DDI_BUF_TRANS_SELECT(level); drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", @@ -1438,9 +1384,6 @@ hsw_set_signal_levels(struct intel_dp *intel_dp, intel_dp->DP &= ~DDI_BUF_EMP_MASK; intel_dp->DP |= signal_levels; - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - skl_ddi_set_iboost(encoder, crtc_state, level); - intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); } @@ -1979,7 +1922,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder, encoder->enable_clock(encoder, crtc_state); } -static void intel_ddi_disable_clock(struct intel_encoder *encoder) +void intel_ddi_disable_clock(struct intel_encoder *encoder) { if (encoder->disable_clock) encoder->disable_clock(encoder); @@ -2059,7 +2002,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, u8 width; if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode == TC_PORT_TBT_ALT) + intel_tc_port_in_tbt_alt_mode(dig_port)) return; if (DISPLAY_VER(dev_priv) >= 12) { @@ -2084,7 +2027,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, switch (pin_assignment) { case 0x0: drm_WARN_ON(&dev_priv->drm, - dig_port->tc_mode != TC_PORT_LEGACY); + !intel_tc_port_in_legacy_mode(dig_port)); if (width == 1) { ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { @@ -2329,15 +2272,19 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - int level = intel_ddi_dp_level(intel_dp); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); /* + * We only configure what the register value will be here. Actual + * enabling happens during link training farther down. + */ + intel_ddi_init_dp_buf_reg(encoder, crtc_state); + + /* * 1. Enable Power Wells * * This was handled at the beginning of intel_atomic_commit_tail(), @@ -2353,8 +2300,7 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_clock(encoder, crtc_state); /* 4. Enable IO power */ - if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); @@ -2374,7 +2320,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, */ intel_ddi_enable_pipe_clock(encoder, crtc_state); - /* 5.b Not relevant to i915 for now */ + /* 5.b Configure transcoder for DP 2.0 128b/132b */ + intel_ddi_config_transcoder_dp2(encoder, crtc_state); /* * 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST @@ -2391,21 +2338,12 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, */ /* 5.e Configure voltage swing and related IO settings */ - intel_snps_phy_ddi_vswing_sequence(encoder, level); - - /* - * 5.f Configure and enable DDI_BUF_CTL - * 5.g Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout - * after 1200 us. - * - * We only configure what the register value will be here. Actual - * enabling happens during link training farther down. - */ - intel_ddi_init_dp_buf_reg(encoder, crtc_state); + encoder->set_signal_levels(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); + intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit @@ -2413,6 +2351,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, * training */ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + intel_dp_check_frl_training(intel_dp); + intel_dp_pcon_dsc_configure(intel_dp, crtc_state); /* * 5.h Follow DisplayPort specification training sequence (see notes for @@ -2429,7 +2369,10 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 5.k Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_enable(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + + intel_dsc_enable(crtc_state); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2439,16 +2382,20 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - int level = intel_ddi_dp_level(intel_dp); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count); /* + * We only configure what the register value will be here. Actual + * enabling happens during link training farther down. + */ + intel_ddi_init_dp_buf_reg(encoder, crtc_state); + + /* * 1. Enable Power Wells * * This was handled at the beginning of intel_atomic_commit_tail(), @@ -2476,8 +2423,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_clock(encoder, crtc_state); /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ - if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) { + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); @@ -2517,7 +2463,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ /* 7.e Configure voltage swing and related IO settings */ - tgl_ddi_vswing_sequence(encoder, crtc_state, level); + encoder->set_signal_levels(encoder, crtc_state); /* * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up @@ -2530,16 +2476,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ intel_ddi_mso_configure(crtc_state); - /* - * 7.g Configure and enable DDI_BUF_CTL - * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout - * after 500 us. - * - * We only configure what the register value will be here. Actual - * enabling happens during link training farther down. - */ - intel_ddi_init_dp_buf_reg(encoder, crtc_state); - if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); @@ -2570,8 +2506,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2582,10 +2521,8 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - int level = intel_ddi_dp_level(intel_dp); if (DISPLAY_VER(dev_priv) < 11) drm_WARN_ON(&dev_priv->drm, @@ -2597,12 +2534,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, crtc_state->port_clock, crtc_state->lane_count); + /* + * We only configure what the register value will be here. Actual + * enabling happens during link training farther down. + */ + intel_ddi_init_dp_buf_reg(encoder, crtc_state); + intel_pps_on(intel_dp); intel_ddi_enable_clock(encoder, crtc_state); - if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) { + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) { drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); @@ -2610,16 +2552,13 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); - if (DISPLAY_VER(dev_priv) >= 11) - icl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_ddi_vswing_sequence(encoder, crtc_state, level); - else + if (has_buf_trans_select(dev_priv)) hsw_prepare_dp_ddi_buffers(encoder, crtc_state); + encoder->set_signal_levels(encoder, crtc_state); + intel_ddi_power_up_lanes(encoder, crtc_state); - intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); intel_dp_configure_protocol_converter(intel_dp, crtc_state); @@ -2636,8 +2575,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2772,7 +2713,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, struct intel_dp *intel_dp = &dig_port->dp; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (!is_mst) intel_dp_set_infoframes(encoder, false, @@ -2815,8 +2755,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, intel_pps_vdd_on(intel_dp); intel_pps_off(intel_dp); - if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) + if (!intel_tc_port_in_tbt_alt_mode(dig_port)) intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, fetch_and_zero(&dig_port->ddi_io_wakeref)); @@ -2862,7 +2801,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_transcoder(old_crtc_state); intel_vrr_disable(old_crtc_state); @@ -2877,12 +2816,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, } if (old_crtc_state->bigjoiner_linked_crtc) { - struct intel_atomic_state *state = - to_intel_atomic_state(old_crtc_state->uapi.state); - struct intel_crtc *slave = + struct intel_crtc *slave_crtc = old_crtc_state->bigjoiner_linked_crtc; const struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave); + intel_atomic_get_old_crtc_state(state, slave_crtc); intel_crtc_vblank_off(old_slave_crtc_state); @@ -2919,41 +2856,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_tc_port_put_link(dig_port); } -void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; - - /* - * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) - * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, - * step 13 is the correct place for it. Step 18 is where it was - * originally before the BUN. - */ - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - intel_disable_ddi_buf(encoder, old_crtc_state); - intel_ddi_disable_clock(encoder); - - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_PLL_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); -} - static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) @@ -3004,13 +2906,13 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, if (port == PORT_A && DISPLAY_VER(dev_priv) < 9) intel_dp_stop_link_train(intel_dp, crtc_state); + drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); - intel_psr_enable(intel_dp, crtc_state, conn_state); if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); - intel_edp_drrs_enable(intel_dp, crtc_state); + intel_drrs_enable(intel_dp, crtc_state); if (crtc_state->has_audio) intel_audio_codec_enable(encoder, crtc_state, conn_state); @@ -3046,7 +2948,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_connector *connector = conn_state->connector; - int level = intel_ddi_hdmi_level(encoder, crtc_state); enum port port = encoder->port; if (!intel_hdmi_handle_sink_scrambling(encoder, connector, @@ -3056,19 +2957,10 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", connector->base.id, connector->name); - if (IS_DG2(dev_priv)) - intel_snps_phy_ddi_vswing_sequence(encoder, U32_MAX); - else if (DISPLAY_VER(dev_priv) >= 12) - tgl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (DISPLAY_VER(dev_priv) == 11) - icl_ddi_vswing_sequence(encoder, crtc_state, level); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - bxt_ddi_vswing_sequence(encoder, crtc_state, level); - else - hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level); + if (has_buf_trans_select(dev_priv)) + hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state); - if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - skl_ddi_set_iboost(encoder, crtc_state, level); + encoder->set_signal_levels(encoder, crtc_state); /* Display WA #1143: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { @@ -3133,7 +3025,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state, intel_vrr_enable(encoder, crtc_state); - intel_enable_pipe(crtc_state); + intel_enable_transcoder(crtc_state); intel_crtc_vblank_on(crtc_state); @@ -3159,6 +3051,12 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state, intel_dp->link_trained = false; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + intel_drrs_disable(intel_dp, old_crtc_state); + intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); /* Disable the decompression in DP Sink */ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, @@ -3176,6 +3074,10 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) drm_dbg_kms(&i915->drm, @@ -3183,25 +3085,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, connector->base.id, connector->name); } -static void intel_pre_disable_ddi(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct intel_dp *intel_dp; - - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); - - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) - return; - - intel_dp = enc_to_intel_dp(encoder); - intel_edp_drrs_disable(intel_dp, old_crtc_state); - intel_psr_disable(intel_dp, old_crtc_state); -} - static void intel_disable_ddi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -3226,11 +3109,11 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state, intel_ddi_set_dp_msa(crtc_state, conn_state); - intel_psr_update(intel_dp, crtc_state, conn_state); intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); - intel_edp_drrs_update(intel_dp, crtc_state); + intel_drrs_update(intel_dp, crtc_state); - intel_panel_update_backlight(state, encoder, crtc_state, conn_state); + intel_backlight_update(state, encoder, crtc_state, conn_state); + drm_connector_update_privacy_screen(conn_state); } void intel_ddi_update_pipe(struct intel_atomic_state *state, @@ -3260,8 +3143,14 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, intel_tc_port_get_link(enc_to_dig_port(encoder), required_lanes); - if (crtc_state && crtc_state->hw.active) + if (crtc_state && crtc_state->hw.active) { + struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc; + intel_update_active_dpll(state, crtc, encoder); + + if (slave_crtc) + intel_update_active_dpll(state, slave_crtc, encoder); + } } static void @@ -3293,7 +3182,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, intel_ddi_main_link_aux_domain(dig_port)); } - if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT) + if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port)) /* * Program the lane count for static/dynamic connections on * Type-C ports. Skip this step for TBT. @@ -3553,9 +3442,6 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); pipe_config->lane_count = 4; break; - case TRANS_DDI_MODE_SELECT_FDI: - pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); - break; case TRANS_DDI_MODE_SELECT_DP_SST: if (encoder->type == INTEL_OUTPUT_EDP) pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); @@ -3584,6 +3470,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); break; + case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B: + if (!HAS_DP20(dev_priv)) { + /* FDI */ + pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); + break; + } + fallthrough; /* 128b/132b */ case TRANS_DDI_MODE_SELECT_DP_MST: pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); pipe_config->lane_count = @@ -3613,18 +3506,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; - if (pipe_config->bigjoiner_slave) { - /* read out pipe settings from master */ - enum transcoder save = pipe_config->cpu_transcoder; - - /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */ - WARN_ON(pipe_config->output_types); - pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe; - intel_ddi_read_func_ctl(encoder, pipe_config); - pipe_config->cpu_transcoder = save; - } else { - intel_ddi_read_func_ctl(encoder, pipe_config); - } + intel_ddi_read_func_ctl(encoder, pipe_config); intel_ddi_mso_get_config(encoder, pipe_config); @@ -3652,8 +3534,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } - if (!pipe_config->bigjoiner_slave) - ddi_dotclock_get(pipe_config); + ddi_dotclock_get(pipe_config); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -3807,7 +3688,13 @@ void hsw_ddi_get_config(struct intel_encoder *encoder, static void intel_ddi_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - if (intel_crtc_has_dp_encoder(crtc_state)) + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_phy_is_tc(i915, phy)) + intel_tc_port_sanitize(enc_to_dig_port(encoder)); + + if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) intel_dp_sync_state(encoder, crtc_state); } @@ -3989,13 +3876,15 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->dev); struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_dp_encoder_flush_work(encoder); + if (intel_phy_is_tc(i915, phy)) + intel_tc_port_flush_work(dig_port); intel_display_power_flush_work(i915); drm_encoder_cleanup(encoder); - if (dig_port) - kfree(dig_port->hdcp_port_data.streams); + kfree(dig_port->hdcp_port_data.streams); kfree(dig_port); } @@ -4016,7 +3905,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { static struct intel_connector * intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); struct intel_connector *connector; enum port port = dig_port->base.port; @@ -4029,17 +3917,6 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) dig_port->dp.set_link_train = intel_ddi_set_link_train; dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; - if (IS_DG2(dev_priv)) - dig_port->dp.set_signal_levels = dg2_set_signal_levels; - else if (DISPLAY_VER(dev_priv) >= 12) - dig_port->dp.set_signal_levels = tgl_set_signal_levels; - else if (DISPLAY_VER(dev_priv) >= 11) - dig_port->dp.set_signal_levels = icl_set_signal_levels; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dig_port->dp.set_signal_levels = bxt_set_signal_levels; - else - dig_port->dp.set_signal_levels = hsw_set_signal_levels; - dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; @@ -4048,6 +3925,19 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) return NULL; } + if (dig_port->base.type == INTEL_OUTPUT_EDP) { + struct drm_device *dev = dig_port->base.base.dev; + struct drm_privacy_screen *privacy_screen; + + privacy_screen = drm_privacy_screen_get(dev->dev, NULL); + if (!IS_ERR(privacy_screen)) { + drm_connector_attach_privacy_screen_provider(&connector->base, + privacy_screen); + } else if (PTR_ERR(privacy_screen) != -ENODEV) { + drm_warn(dev, "Error getting privacy-screen\n"); + } + } + return connector; } @@ -4415,7 +4305,7 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) if (!intel_phy_is_tc(i915, phy)) return; - intel_tc_port_disconnect_phy(dig_port); + intel_tc_port_flush_work(dig_port); } static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) @@ -4426,11 +4316,12 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(i915, encoder->port); intel_dp_encoder_shutdown(encoder); + intel_hdmi_encoder_shutdown(encoder); if (!intel_phy_is_tc(i915, phy)) return; - intel_tc_port_disconnect_phy(dig_port); + intel_tc_port_flush_work(dig_port); } #define port_tc_name(port) ((port) - PORT_TC1 + '1') @@ -4536,7 +4427,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->enable = intel_enable_ddi; encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; - encoder->pre_disable = intel_pre_disable_ddi; encoder->disable = intel_disable_ddi; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; @@ -4611,6 +4501,24 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->get_config = hsw_ddi_get_config; } + if (IS_DG2(dev_priv)) { + encoder->set_signal_levels = intel_snps_phy_set_signal_levels; + } else if (DISPLAY_VER(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + encoder->set_signal_levels = icl_combo_phy_set_signal_levels; + else + encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels; + } else if (DISPLAY_VER(dev_priv) >= 11) { + if (intel_phy_is_combo(dev_priv, phy)) + encoder->set_signal_levels = icl_combo_phy_set_signal_levels; + else + encoder->set_signal_levels = icl_mg_phy_set_signal_levels; + } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels; + } else { + encoder->set_signal_levels = hsw_set_signal_levels; + } + intel_ddi_buf_trans_init(encoder); if (DISPLAY_VER(dev_priv) >= 13) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 7d448485d887..c2fea6562917 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,11 +6,11 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include "intel_display.h" #include "i915_reg.h" struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -18,6 +18,8 @@ struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; struct intel_shared_dpll; +enum pipe; +enum port; enum transcoder; i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, @@ -30,6 +32,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state); void intel_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_clock(struct intel_encoder *encoder); void intel_ddi_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll); @@ -59,13 +62,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); -u32 bxt_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -u32 ddi_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, enum transcoder cpu_transcoder, bool enable, u32 hdcp_mask); void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); +int intel_ddi_level(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int lane); #endif /* __INTEL_DDI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index ba2c08f1a797..1e689d573512 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -8,12 +8,13 @@ #include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dp.h" /* HDMI/DVI modes ignore everything but the last 2 items. So we share * them for both DP and FDI transports, allowing those ports to * automatically adapt to HDMI connections as well */ -static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _hsw_trans_dp[] = { { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00040006, 0x0 } }, @@ -25,12 +26,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = { { .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } }, }; -static const struct intel_ddi_buf_trans hsw_ddi_translations_dp = { - .entries = _hsw_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_hsw_ddi_translations_dp), +static const struct intel_ddi_buf_trans hsw_trans_dp = { + .entries = _hsw_trans_dp, + .num_entries = ARRAY_SIZE(_hsw_trans_dp), }; -static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = { +static const union intel_ddi_buf_trans_entry _hsw_trans_fdi[] = { { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00060006, 0x0 } }, @@ -42,12 +43,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = { { .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } }, }; -static const struct intel_ddi_buf_trans hsw_ddi_translations_fdi = { - .entries = _hsw_ddi_translations_fdi, - .num_entries = ARRAY_SIZE(_hsw_ddi_translations_fdi), +static const struct intel_ddi_buf_trans hsw_trans_fdi = { + .entries = _hsw_trans_fdi, + .num_entries = ARRAY_SIZE(_hsw_trans_fdi), }; -static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _hsw_trans_hdmi[] = { /* Idx NT mV d T mV d db */ { .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */ { .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */ @@ -63,13 +64,13 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = { { .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */ }; -static const struct intel_ddi_buf_trans hsw_ddi_translations_hdmi = { - .entries = _hsw_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_hsw_ddi_translations_hdmi), +static const struct intel_ddi_buf_trans hsw_trans_hdmi = { + .entries = _hsw_trans_hdmi, + .num_entries = ARRAY_SIZE(_hsw_trans_hdmi), .hdmi_default_entry = 6, }; -static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _bdw_trans_edp[] = { { .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } }, { .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } }, { .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } }, @@ -81,12 +82,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = { { .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } }, }; -static const struct intel_ddi_buf_trans bdw_ddi_translations_edp = { - .entries = _bdw_ddi_translations_edp, - .num_entries = ARRAY_SIZE(_bdw_ddi_translations_edp), +static const struct intel_ddi_buf_trans bdw_trans_edp = { + .entries = _bdw_trans_edp, + .num_entries = ARRAY_SIZE(_bdw_trans_edp), }; -static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _bdw_trans_dp[] = { { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, { .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } }, @@ -98,12 +99,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = { { .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } }, }; -static const struct intel_ddi_buf_trans bdw_ddi_translations_dp = { - .entries = _bdw_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_bdw_ddi_translations_dp), +static const struct intel_ddi_buf_trans bdw_trans_dp = { + .entries = _bdw_trans_dp, + .num_entries = ARRAY_SIZE(_bdw_trans_dp), }; -static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = { +static const union intel_ddi_buf_trans_entry _bdw_trans_fdi[] = { { .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } }, { .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } }, { .hsw = { 0x00C30FFF, 0x00070006, 0x0 } }, @@ -115,12 +116,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = { { .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } }, }; -static const struct intel_ddi_buf_trans bdw_ddi_translations_fdi = { - .entries = _bdw_ddi_translations_fdi, - .num_entries = ARRAY_SIZE(_bdw_ddi_translations_fdi), +static const struct intel_ddi_buf_trans bdw_trans_fdi = { + .entries = _bdw_trans_fdi, + .num_entries = ARRAY_SIZE(_bdw_trans_fdi), }; -static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _bdw_trans_hdmi[] = { /* Idx NT mV d T mV df db */ { .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */ { .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */ @@ -134,14 +135,14 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = { { .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */ }; -static const struct intel_ddi_buf_trans bdw_ddi_translations_hdmi = { - .entries = _bdw_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_bdw_ddi_translations_hdmi), +static const struct intel_ddi_buf_trans bdw_trans_hdmi = { + .entries = _bdw_trans_hdmi, + .num_entries = ARRAY_SIZE(_bdw_trans_hdmi), .hdmi_default_entry = 7, }; /* Skylake H and S */ -static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _skl_trans_dp[] = { { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, @@ -153,13 +154,13 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; -static const struct intel_ddi_buf_trans skl_ddi_translations_dp = { - .entries = _skl_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_skl_ddi_translations_dp), +static const struct intel_ddi_buf_trans skl_trans_dp = { + .entries = _skl_trans_dp, + .num_entries = ARRAY_SIZE(_skl_trans_dp), }; /* Skylake U */ -static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _skl_u_trans_dp[] = { { .hsw = { 0x0000201B, 0x000000A2, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x1 } }, @@ -171,13 +172,13 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; -static const struct intel_ddi_buf_trans skl_u_ddi_translations_dp = { - .entries = _skl_u_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_dp), +static const struct intel_ddi_buf_trans skl_u_trans_dp = { + .entries = _skl_u_trans_dp, + .num_entries = ARRAY_SIZE(_skl_u_trans_dp), }; /* Skylake Y */ -static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _skl_y_trans_dp[] = { { .hsw = { 0x00000018, 0x000000A2, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, @@ -189,13 +190,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; -static const struct intel_ddi_buf_trans skl_y_ddi_translations_dp = { - .entries = _skl_y_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_dp), +static const struct intel_ddi_buf_trans skl_y_trans_dp = { + .entries = _skl_y_trans_dp, + .num_entries = ARRAY_SIZE(_skl_y_trans_dp), }; /* Kabylake H and S */ -static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _kbl_trans_dp[] = { { .hsw = { 0x00002016, 0x000000A0, 0x0 } }, { .hsw = { 0x00005012, 0x0000009B, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, @@ -207,13 +208,13 @@ static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x1 } }, }; -static const struct intel_ddi_buf_trans kbl_ddi_translations_dp = { - .entries = _kbl_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_kbl_ddi_translations_dp), +static const struct intel_ddi_buf_trans kbl_trans_dp = { + .entries = _kbl_trans_dp, + .num_entries = ARRAY_SIZE(_kbl_trans_dp), }; /* Kabylake U */ -static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _kbl_u_trans_dp[] = { { .hsw = { 0x0000201B, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, @@ -225,13 +226,13 @@ static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; -static const struct intel_ddi_buf_trans kbl_u_ddi_translations_dp = { - .entries = _kbl_u_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_kbl_u_ddi_translations_dp), +static const struct intel_ddi_buf_trans kbl_u_trans_dp = { + .entries = _kbl_u_trans_dp, + .num_entries = ARRAY_SIZE(_kbl_u_trans_dp), }; /* Kabylake Y */ -static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _kbl_y_trans_dp[] = { { .hsw = { 0x00001017, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x00000088, 0x0 } }, { .hsw = { 0x80007011, 0x000000CD, 0x3 } }, @@ -243,16 +244,16 @@ static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = { { .hsw = { 0x80005012, 0x000000C0, 0x3 } }, }; -static const struct intel_ddi_buf_trans kbl_y_ddi_translations_dp = { - .entries = _kbl_y_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_kbl_y_ddi_translations_dp), +static const struct intel_ddi_buf_trans kbl_y_trans_dp = { + .entries = _kbl_y_trans_dp, + .num_entries = ARRAY_SIZE(_kbl_y_trans_dp), }; /* * Skylake/Kabylake H and S * eDP 1.4 low vswing translation parameters */ -static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _skl_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, @@ -265,16 +266,16 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = { { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, }; -static const struct intel_ddi_buf_trans skl_ddi_translations_edp = { - .entries = _skl_ddi_translations_edp, - .num_entries = ARRAY_SIZE(_skl_ddi_translations_edp), +static const struct intel_ddi_buf_trans skl_trans_edp = { + .entries = _skl_trans_edp, + .num_entries = ARRAY_SIZE(_skl_trans_edp), }; /* * Skylake/Kabylake U * eDP 1.4 low vswing translation parameters */ -static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _skl_u_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000A9, 0x0 } }, { .hsw = { 0x00007011, 0x000000A2, 0x0 } }, @@ -287,16 +288,16 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = { { .hsw = { 0x00000018, 0x000000DF, 0x0 } }, }; -static const struct intel_ddi_buf_trans skl_u_ddi_translations_edp = { - .entries = _skl_u_ddi_translations_edp, - .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_edp), +static const struct intel_ddi_buf_trans skl_u_trans_edp = { + .entries = _skl_u_trans_edp, + .num_entries = ARRAY_SIZE(_skl_u_trans_edp), }; /* * Skylake/Kabylake Y * eDP 1.4 low vswing translation parameters */ -static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _skl_y_trans_edp[] = { { .hsw = { 0x00000018, 0x000000A8, 0x0 } }, { .hsw = { 0x00004013, 0x000000AB, 0x0 } }, { .hsw = { 0x00007011, 0x000000A4, 0x0 } }, @@ -309,13 +310,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = { { .hsw = { 0x00000018, 0x0000008A, 0x0 } }, }; -static const struct intel_ddi_buf_trans skl_y_ddi_translations_edp = { - .entries = _skl_y_ddi_translations_edp, - .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_edp), +static const struct intel_ddi_buf_trans skl_y_trans_edp = { + .entries = _skl_y_trans_edp, + .num_entries = ARRAY_SIZE(_skl_y_trans_edp), }; /* Skylake/Kabylake U, H and S */ -static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _skl_trans_hdmi[] = { { .hsw = { 0x00000018, 0x000000AC, 0x0 } }, { .hsw = { 0x00005012, 0x0000009D, 0x0 } }, { .hsw = { 0x00007011, 0x00000088, 0x0 } }, @@ -329,14 +330,14 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = { { .hsw = { 0x80000018, 0x000000C0, 0x1 } }, }; -static const struct intel_ddi_buf_trans skl_ddi_translations_hdmi = { - .entries = _skl_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_skl_ddi_translations_hdmi), +static const struct intel_ddi_buf_trans skl_trans_hdmi = { + .entries = _skl_trans_hdmi, + .num_entries = ARRAY_SIZE(_skl_trans_hdmi), .hdmi_default_entry = 8, }; /* Skylake/Kabylake Y */ -static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _skl_y_trans_hdmi[] = { { .hsw = { 0x00000018, 0x000000A1, 0x0 } }, { .hsw = { 0x00005012, 0x000000DF, 0x0 } }, { .hsw = { 0x80007011, 0x000000CB, 0x3 } }, @@ -350,13 +351,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = { { .hsw = { 0x80000018, 0x000000C0, 0x3 } }, }; -static const struct intel_ddi_buf_trans skl_y_ddi_translations_hdmi = { - .entries = _skl_y_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_hdmi), +static const struct intel_ddi_buf_trans skl_y_trans_hdmi = { + .entries = _skl_y_trans_hdmi, + .num_entries = ARRAY_SIZE(_skl_y_trans_hdmi), .hdmi_default_entry = 8, }; -static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _bxt_trans_dp[] = { /* Idx NT mV diff db */ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ { .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ @@ -370,12 +371,12 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = { { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ }; -static const struct intel_ddi_buf_trans bxt_ddi_translations_dp = { - .entries = _bxt_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_bxt_ddi_translations_dp), +static const struct intel_ddi_buf_trans bxt_trans_dp = { + .entries = _bxt_trans_dp, + .num_entries = ARRAY_SIZE(_bxt_trans_dp), }; -static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = { +static const union intel_ddi_buf_trans_entry _bxt_trans_edp[] = { /* Idx NT mV diff db */ { .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */ { .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */ @@ -389,15 +390,15 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = { { .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */ }; -static const struct intel_ddi_buf_trans bxt_ddi_translations_edp = { - .entries = _bxt_ddi_translations_edp, - .num_entries = ARRAY_SIZE(_bxt_ddi_translations_edp), +static const struct intel_ddi_buf_trans bxt_trans_edp = { + .entries = _bxt_trans_edp, + .num_entries = ARRAY_SIZE(_bxt_trans_edp), }; /* BSpec has 2 recommended values - entries 0 and 8. * Using the entry with higher vswing. */ -static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _bxt_trans_hdmi[] = { /* Idx NT mV diff db */ { .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */ { .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */ @@ -411,14 +412,14 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = { { .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */ }; -static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = { - .entries = _bxt_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_bxt_ddi_translations_hdmi), - .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1, +static const struct intel_ddi_buf_trans bxt_trans_hdmi = { + .entries = _bxt_trans_hdmi, + .num_entries = ARRAY_SIZE(_bxt_trans_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_bxt_trans_hdmi) - 1, }; -/* icl_combo_phy_ddi_translations */ -static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = { +/* icl_combo_phy_trans */ +static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_dp_hbr2_edp_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -432,12 +433,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = { - .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, - .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3), +static const struct intel_ddi_buf_trans icl_combo_phy_trans_dp_hbr2_edp_hbr3 = { + .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3, + .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3), }; -static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ @@ -451,12 +452,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp { .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; -static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = { - .entries = _icl_combo_phy_ddi_translations_edp_hbr2, - .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2), +static const struct intel_ddi_buf_trans icl_combo_phy_trans_edp_hbr2 = { + .entries = _icl_combo_phy_trans_edp_hbr2, + .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2), }; -static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_hdmi[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */ { .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */ @@ -467,13 +468,13 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdm { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */ }; -static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = { - .entries = _icl_combo_phy_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi), - .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi) - 1, +static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { + .entries = _icl_combo_phy_trans_hdmi, + .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_trans_hdmi) - 1, }; -static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = { +static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ @@ -487,12 +488,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = { - .entries = _ehl_combo_phy_ddi_translations_dp, - .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_dp), +static const struct intel_ddi_buf_trans ehl_combo_phy_trans_dp = { + .entries = _ehl_combo_phy_trans_dp, + .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_dp), }; -static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ @@ -506,12 +507,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; -static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = { - .entries = _ehl_combo_phy_ddi_translations_edp_hbr2, - .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_edp_hbr2), +static const struct intel_ddi_buf_trans ehl_combo_phy_trans_edp_hbr2 = { + .entries = _ehl_combo_phy_trans_edp_hbr2, + .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_edp_hbr2), }; -static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = { +static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */ @@ -525,12 +526,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; -static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = { - .entries = _jsl_combo_phy_ddi_translations_edp_hbr, - .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr), +static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr = { + .entries = _jsl_combo_phy_trans_edp_hbr, + .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr), }; -static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */ { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */ @@ -544,12 +545,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ }; -static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = { - .entries = _jsl_combo_phy_ddi_translations_edp_hbr2, - .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr2), +static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr2 = { + .entries = _jsl_combo_phy_trans_edp_hbr2, + .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr2), }; -static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { +static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_rbr_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ @@ -563,12 +564,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = { - .entries = _dg1_combo_phy_ddi_translations_dp_rbr_hbr, - .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_rbr_hbr), +static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_rbr_hbr = { + .entries = _dg1_combo_phy_trans_dp_rbr_hbr, + .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_rbr_hbr), }; -static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */ @@ -582,12 +583,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = { - .entries = _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_hbr2_hbr3), +static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_hbr2_hbr3 = { + .entries = _dg1_combo_phy_trans_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_hbr2_hbr3), }; -static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hbr[] = { +static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_rbr_hbr[] = { /* Voltage swing pre-emphasis */ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ @@ -601,12 +602,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hb { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ }; -static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr = { - .entries = _icl_mg_phy_ddi_translations_rbr_hbr, - .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_rbr_hbr), +static const struct intel_ddi_buf_trans icl_mg_phy_trans_rbr_hbr = { + .entries = _icl_mg_phy_trans_rbr_hbr, + .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_rbr_hbr), }; -static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hbr2_hbr3[] = { /* Voltage swing pre-emphasis */ { .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */ { .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */ @@ -620,12 +621,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_h { .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */ }; -static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3 = { - .entries = _icl_mg_phy_ddi_translations_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hbr2_hbr3), +static const struct intel_ddi_buf_trans icl_mg_phy_trans_hbr2_hbr3 = { + .entries = _icl_mg_phy_trans_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hbr2_hbr3), }; -static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hdmi[] = { /* HDMI Preset VS Pre-emph */ { .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */ { .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */ @@ -639,13 +640,13 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] { .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */ }; -static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi = { - .entries = _icl_mg_phy_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi), - .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi) - 1, +static const struct intel_ddi_buf_trans icl_mg_phy_trans_hdmi = { + .entries = _icl_mg_phy_trans_hdmi, + .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_trans_hdmi) - 1, }; -static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr[] = { +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ @@ -659,12 +660,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ }; -static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr = { - .entries = _tgl_dkl_phy_ddi_translations_dp_hbr, - .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr), +static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr = { + .entries = _tgl_dkl_phy_trans_dp_hbr, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr), }; -static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr2[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */ @@ -678,12 +679,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */ }; -static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr2 = { - .entries = _tgl_dkl_phy_ddi_translations_dp_hbr2, - .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr2), +static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr2 = { + .entries = _tgl_dkl_phy_trans_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr2), }; -static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_hdmi[] = { /* HDMI Preset VS Pre-emph */ { .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */ { .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */ @@ -697,13 +698,13 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[ { .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */ }; -static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = { - .entries = _tgl_dkl_phy_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi), - .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi) - 1, +static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_hdmi = { + .entries = _tgl_dkl_phy_trans_hdmi, + .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi) - 1, }; -static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = { +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -717,12 +718,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = { - .entries = _tgl_combo_phy_ddi_translations_dp_hbr, - .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr), +static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr = { + .entries = _tgl_combo_phy_trans_dp_hbr, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr), }; -static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -736,12 +737,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = { - .entries = _tgl_combo_phy_ddi_translations_dp_hbr2, - .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr2), +static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr2 = { + .entries = _tgl_combo_phy_trans_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr2), }; -static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_trans_dp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ @@ -755,16 +756,16 @@ static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = { - .entries = _tgl_uy_combo_phy_ddi_translations_dp_hbr2, - .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_ddi_translations_dp_hbr2), +static const struct intel_ddi_buf_trans tgl_uy_combo_phy_trans_dp_hbr2 = { + .entries = _tgl_uy_combo_phy_trans_dp_hbr2, + .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_trans_dp_hbr2), }; /* * Cloned the HOBL entry to comply with the voltage and pre-emphasis entries * that DisplayPort specification requires */ -static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = { +static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_edp_hbr2_hobl[] = { /* VS pre-emp */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */ @@ -777,12 +778,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */ }; -static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = { - .entries = _tgl_combo_phy_ddi_translations_edp_hbr2_hobl, - .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_edp_hbr2_hobl), +static const struct intel_ddi_buf_trans tgl_combo_phy_trans_edp_hbr2_hobl = { + .entries = _tgl_combo_phy_trans_edp_hbr2_hobl, + .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_edp_hbr2_hobl), }; -static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = { +static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -796,12 +797,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = { - .entries = _rkl_combo_phy_ddi_translations_dp_hbr, - .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr), +static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr = { + .entries = _rkl_combo_phy_trans_dp_hbr, + .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr), }; -static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ @@ -815,12 +816,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = { - .entries = _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr2_hbr3), +static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr2_hbr3 = { + .entries = _rkl_combo_phy_trans_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr2_hbr3), }; -static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -834,12 +835,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = { - .entries = _adls_combo_phy_ddi_translations_dp_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_dp_hbr2_hbr3), +static const struct intel_ddi_buf_trans adls_combo_phy_trans_dp_hbr2_hbr3 = { + .entries = _adls_combo_phy_trans_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_dp_hbr2_hbr3), }; -static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = { +static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr2[] = { /* NT mV Trans mV db */ { .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } }, /* 200 200 0.0 */ { .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } }, /* 200 250 1.9 */ @@ -853,12 +854,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed { .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */ }; -static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = { - .entries = _adls_combo_phy_ddi_translations_edp_hbr2, - .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr2), +static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr2 = { + .entries = _adls_combo_phy_trans_edp_hbr2, + .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr2), }; -static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = { +static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -872,12 +873,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = { - .entries = _adls_combo_phy_ddi_translations_edp_hbr3, - .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3), +static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = { + .entries = _adls_combo_phy_trans_edp_hbr3, + .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3), }; -static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hdmi[] = { +static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_hdmi[] = { /* NT mV Trans mV db */ { .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */ { .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ @@ -891,13 +892,13 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hd { .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } }, /* Full Red -3.0 */ }; -static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_hdmi = { - .entries = _adlp_combo_phy_ddi_translations_hdmi, - .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi), - .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi) - 1, +static const struct intel_ddi_buf_trans adlp_combo_phy_trans_hdmi = { + .entries = _adlp_combo_phy_trans_hdmi, + .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi), + .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi) - 1, }; -static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr[] = { +static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -911,12 +912,12 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr = { - .entries = _adlp_combo_phy_ddi_translations_dp_hbr, - .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr), +static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr = { + .entries = _adlp_combo_phy_trans_dp_hbr, + .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr), }; -static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */ @@ -930,22 +931,22 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr2_hbr3 = { - .entries = _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr2_hbr3), +static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = { + .entries = _adlp_combo_phy_trans_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3), }; -static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_hbr3 = { - .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, - .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3), +static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = { + .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3, + .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3), }; -static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_up_to_hbr2 = { - .entries = _icl_combo_phy_ddi_translations_edp_hbr2, - .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2), +static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = { + .entries = _icl_combo_phy_trans_edp_hbr2, + .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2), }; -static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = { +static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */ @@ -959,12 +960,12 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ }; -static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr = { - .entries = _adlp_dkl_phy_ddi_translations_dp_hbr, - .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr), +static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr = { + .entries = _adlp_dkl_phy_trans_dp_hbr, + .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr), }; -static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3[] = { +static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr2_hbr3[] = { /* VS pre-emp Non-trans mV Pre-emph dB */ { .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */ { .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */ @@ -978,21 +979,79 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h { .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */ }; -static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3 = { - .entries = _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3, - .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3), +static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = { + .entries = _adlp_dkl_phy_trans_dp_hbr2_hbr3, + .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr2_hbr3), +}; + +static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = { + { .snps = { 26, 0, 0 } }, /* VS 0, pre-emph 0 */ + { .snps = { 33, 0, 6 } }, /* VS 0, pre-emph 1 */ + { .snps = { 38, 0, 12 } }, /* VS 0, pre-emph 2 */ + { .snps = { 43, 0, 19 } }, /* VS 0, pre-emph 3 */ + { .snps = { 39, 0, 0 } }, /* VS 1, pre-emph 0 */ + { .snps = { 44, 0, 8 } }, /* VS 1, pre-emph 1 */ + { .snps = { 47, 0, 15 } }, /* VS 1, pre-emph 2 */ + { .snps = { 52, 0, 0 } }, /* VS 2, pre-emph 0 */ + { .snps = { 51, 0, 10 } }, /* VS 2, pre-emph 1 */ + { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */ +}; + +static const struct intel_ddi_buf_trans dg2_snps_trans = { + .entries = _dg2_snps_trans, + .num_entries = ARRAY_SIZE(_dg2_snps_trans), + .hdmi_default_entry = ARRAY_SIZE(_dg2_snps_trans) - 1, +}; + +static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = { + { .snps = { 62, 0, 0 } }, /* preset 0 */ + { .snps = { 56, 0, 6 } }, /* preset 1 */ + { .snps = { 51, 0, 11 } }, /* preset 2 */ + { .snps = { 48, 0, 14 } }, /* preset 3 */ + { .snps = { 43, 0, 19 } }, /* preset 4 */ + { .snps = { 59, 3, 0 } }, /* preset 5 */ + { .snps = { 53, 3, 6 } }, /* preset 6 */ + { .snps = { 49, 3, 10 } }, /* preset 7 */ + { .snps = { 45, 3, 14 } }, /* preset 8 */ + { .snps = { 42, 3, 17 } }, /* preset 9 */ + { .snps = { 56, 6, 0 } }, /* preset 10 */ + { .snps = { 50, 6, 6 } }, /* preset 11 */ + { .snps = { 47, 6, 9 } }, /* preset 12 */ + { .snps = { 42, 6, 14 } }, /* preset 13 */ + { .snps = { 46, 8, 8 } }, /* preset 14 */ + { .snps = { 56, 3, 3 } }, /* preset 15 */ +}; + +static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = { + .entries = _dg2_snps_trans_uhbr, + .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr), }; bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table) { - return table == &tgl_combo_phy_ddi_translations_edp_hbr2_hobl; + return table == &tgl_combo_phy_trans_edp_hbr2_hobl; +} + +static bool use_edp_hobl(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + return i915->vbt.edp.hobl && !intel_dp->hobl_failed; +} + +static bool use_edp_low_vswing(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.edp.low_vswing; } static const struct intel_ddi_buf_trans * -intel_get_buf_trans(const struct intel_ddi_buf_trans *ddi_translations, int *num_entries) +intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries) { - *num_entries = ddi_translations->num_entries; - return ddi_translations; + *num_entries = trans->num_entries; + return trans; } static const struct intel_ddi_buf_trans * @@ -1001,11 +1060,11 @@ hsw_get_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) - return intel_get_buf_trans(&hsw_ddi_translations_fdi, n_entries); + return intel_get_buf_trans(&hsw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&hsw_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&hsw_trans_hdmi, n_entries); else - return intel_get_buf_trans(&hsw_ddi_translations_dp, n_entries); + return intel_get_buf_trans(&hsw_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1013,17 +1072,15 @@ bdw_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) - return intel_get_buf_trans(&bdw_ddi_translations_fdi, n_entries); + return intel_get_buf_trans(&bdw_trans_fdi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&bdw_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&bdw_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return intel_get_buf_trans(&bdw_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return intel_get_buf_trans(&bdw_trans_edp, n_entries); else - return intel_get_buf_trans(&bdw_ddi_translations_dp, n_entries); + return intel_get_buf_trans(&bdw_trans_dp, n_entries); } static int skl_buf_trans_num_entries(enum port port, int n_entries) @@ -1037,12 +1094,12 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries) static const struct intel_ddi_buf_trans * _skl_get_buf_trans_dp(struct intel_encoder *encoder, - const struct intel_ddi_buf_trans *ddi_translations, + const struct intel_ddi_buf_trans *trans, int *n_entries) { - ddi_translations = intel_get_buf_trans(ddi_translations, n_entries); + trans = intel_get_buf_trans(trans, n_entries); *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); - return ddi_translations; + return trans; } static const struct intel_ddi_buf_trans * @@ -1050,15 +1107,13 @@ skl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1066,15 +1121,13 @@ skl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1082,15 +1135,13 @@ skl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1098,15 +1149,13 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &kbl_y_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1114,15 +1163,13 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &kbl_u_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1130,15 +1177,13 @@ kbl_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&skl_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries); else - return _skl_get_buf_trans_dp(encoder, &kbl_ddi_translations_dp, n_entries); + return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1146,15 +1191,13 @@ bxt_get_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&bxt_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&bxt_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - i915->vbt.edp.low_vswing) - return intel_get_buf_trans(&bxt_ddi_translations_edp, n_entries); + use_edp_low_vswing(encoder)) + return intel_get_buf_trans(&bxt_trans_edp, n_entries); else - return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries); + return intel_get_buf_trans(&bxt_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1162,7 +1205,7 @@ icl_get_combo_buf_trans_dp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } @@ -1171,13 +1214,11 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (crtc_state->port_clock > 540000) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + } else if (use_edp_low_vswing(encoder)) { + return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1190,7 +1231,7 @@ icl_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1203,10 +1244,10 @@ icl_get_mg_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) { - return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hbr2_hbr3, + return intel_get_buf_trans(&icl_mg_phy_trans_hbr2_hbr3, n_entries); } else { - return intel_get_buf_trans(&icl_mg_phy_ddi_translations_rbr_hbr, + return intel_get_buf_trans(&icl_mg_phy_trans_rbr_hbr, n_entries); } } @@ -1217,7 +1258,7 @@ icl_get_mg_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_mg_phy_trans_hdmi, n_entries); else return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries); } @@ -1228,9 +1269,9 @@ ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_edp_hbr2, n_entries); + return intel_get_buf_trans(&ehl_combo_phy_trans_edp_hbr2, n_entries); else - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } static const struct intel_ddi_buf_trans * @@ -1238,15 +1279,13 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else - return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_dp, n_entries); + return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries); } static const struct intel_ddi_buf_trans * @@ -1255,9 +1294,9 @@ jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr2, n_entries); + return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr2, n_entries); else - return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr, n_entries); + return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr, n_entries); } static const struct intel_ddi_buf_trans * @@ -1265,15 +1304,13 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) && - dev_priv->vbt.edp.low_vswing) + use_edp_low_vswing(encoder)) return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); } static const struct intel_ddi_buf_trans * @@ -1285,14 +1322,14 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, if (crtc_state->port_clock > 270000) { if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { - return intel_get_buf_trans(&tgl_uy_combo_phy_ddi_translations_dp_hbr2, + return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2, n_entries); } else { - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr2, + return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr2, n_entries); } } else { - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, + return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries); } } @@ -1302,17 +1339,14 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + } else if (use_edp_hobl(encoder)) { + return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + } else if (use_edp_low_vswing(encoder)) { + return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1325,7 +1359,7 @@ tgl_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1338,10 +1372,10 @@ dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_hbr2_hbr3, + return intel_get_buf_trans(&dg1_combo_phy_trans_dp_hbr2_hbr3, n_entries); else - return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_rbr_hbr, + return intel_get_buf_trans(&dg1_combo_phy_trans_dp_rbr_hbr, n_entries); } @@ -1350,17 +1384,14 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + else if (use_edp_hobl(encoder)) + return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - else if (dev_priv->vbt.edp.low_vswing) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + else if (use_edp_low_vswing(encoder)) + return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); else return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); @@ -1372,7 +1403,7 @@ dg1_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1385,9 +1416,9 @@ rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries); + return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr2_hbr3, n_entries); else - return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr, n_entries); + return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * @@ -1395,17 +1426,14 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, + return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + } else if (use_edp_hobl(encoder)) { + return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, + } else if (use_edp_low_vswing(encoder)) { + return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries); } @@ -1418,7 +1446,7 @@ rkl_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1431,9 +1459,9 @@ adls_get_combo_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&adls_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries); + return intel_get_buf_trans(&adls_combo_phy_trans_dp_hbr2_hbr3, n_entries); else - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, n_entries); + return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * @@ -1441,15 +1469,12 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) - return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr3, n_entries); - else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed) - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, n_entries); - else if (i915->vbt.edp.low_vswing) - return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr2, n_entries); + return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries); + else if (use_edp_hobl(encoder)) + return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); + else if (use_edp_low_vswing(encoder)) + return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries); else return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); } @@ -1460,7 +1485,7 @@ adls_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1473,9 +1498,9 @@ adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) - return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries); + return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr2_hbr3, n_entries); else - return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr, n_entries); + return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr, n_entries); } static const struct intel_ddi_buf_trans * @@ -1483,17 +1508,14 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (crtc_state->port_clock > 540000) { - return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_hbr3, + return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3, n_entries); - } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) { - return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, + } else if (use_edp_hobl(encoder)) { + return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries); - } else if (dev_priv->vbt.edp.low_vswing) { - return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_up_to_hbr2, + } else if (use_edp_low_vswing(encoder)) { + return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2, n_entries); } @@ -1506,7 +1528,7 @@ adlp_get_combo_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&adlp_combo_phy_trans_hdmi, n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries); else @@ -1519,10 +1541,10 @@ tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) { - return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr2, + return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr2, n_entries); } else { - return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr, + return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr, n_entries); } } @@ -1533,7 +1555,7 @@ tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries); else return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } @@ -1544,10 +1566,10 @@ adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { if (crtc_state->port_clock > 270000) { - return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3, + return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr2_hbr3, n_entries); } else { - return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr, + return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr, n_entries); } } @@ -1558,29 +1580,21 @@ adlp_get_dkl_buf_trans(struct intel_encoder *encoder, int *n_entries) { if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries); + return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries); else return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries); } -int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *default_entry) +static const struct intel_ddi_buf_trans * +dg2_get_snps_buf_trans(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int *n_entries) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_ddi_buf_trans *ddi_translations; - int n_entries; - - ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries); - - if (drm_WARN_ON(&dev_priv->drm, !ddi_translations)) { - *default_entry = 0; - return 0; - } - - *default_entry = ddi_translations->hdmi_default_entry; - - return n_entries; + if (intel_crtc_has_dp_encoder(crtc_state) && + intel_dp_is_uhbr(crtc_state)) + return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries); + else + return intel_get_buf_trans(&dg2_snps_trans, n_entries); } void intel_ddi_buf_trans_init(struct intel_encoder *encoder) @@ -1588,7 +1602,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder) struct drm_i915_private *i915 = to_i915(encoder->base.dev); enum phy phy = intel_port_to_phy(i915, encoder->port); - if (IS_ALDERLAKE_P(i915)) { + if (IS_DG2(i915)) { + encoder->get_buf_trans = dg2_get_snps_buf_trans; + } else if (IS_ALDERLAKE_P(i915)) { if (intel_phy_is_combo(i915, phy)) encoder->get_buf_trans = adlp_get_combo_buf_trans; else diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h index 2acd720f9d4f..2133984a572b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h @@ -34,15 +34,21 @@ struct icl_ddi_buf_trans { }; struct icl_mg_phy_ddi_buf_trans { - u32 cri_txdeemph_override_11_6; - u32 cri_txdeemph_override_5_0; - u32 cri_txdeemph_override_17_12; + u8 cri_txdeemph_override_11_6; + u8 cri_txdeemph_override_5_0; + u8 cri_txdeemph_override_17_12; }; struct tgl_dkl_phy_ddi_buf_trans { - u32 dkl_vswing_control; - u32 dkl_preshoot_control; - u32 dkl_de_emphasis_control; + u8 vswing; + u8 preshoot; + u8 de_emphasis; +}; + +struct dg2_snps_phy_buf_trans { + u8 vswing; + u8 pre_cursor; + u8 post_cursor; }; union intel_ddi_buf_trans_entry { @@ -51,6 +57,7 @@ union intel_ddi_buf_trans_entry { struct icl_ddi_buf_trans icl; struct icl_mg_phy_ddi_buf_trans mg; struct tgl_dkl_phy_ddi_buf_trans dkl; + struct dg2_snps_phy_buf_trans snps; }; struct intel_ddi_buf_trans { @@ -61,10 +68,6 @@ struct intel_ddi_buf_trans { bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table); -int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - int *default_entry); - void intel_ddi_buf_trans_init(struct intel_encoder *encoder); #endif diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 134a6acbd8fb..6fbad5c6cc71 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/dma-resv.h> #include <linux/slab.h> +#include <linux/vga_switcheroo.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -41,6 +42,7 @@ #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_privacy_screen_consumer.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> @@ -52,6 +54,7 @@ #include "display/intel_dp_mst.h" #include "display/intel_dpll.h" #include "display/intel_dpll_mgr.h" +#include "display/intel_drrs.h" #include "display/intel_dsi.h" #include "display/intel_dvo.h" #include "display/intel_fb.h" @@ -67,12 +70,12 @@ #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_object.h" -#include "gt/intel_rps.h" #include "gt/gen8_ppgtt.h" #include "g4x_dp.h" #include "g4x_hdmi.h" #include "i915_drv.h" +#include "icl_dsi.h" #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" @@ -84,35 +87,35 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp_link_training.h" +#include "intel_dpt.h" #include "intel_fbc.h" -#include "intel_fdi.h" #include "intel_fbdev.h" +#include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_frontbuffer.h" #include "intel_hdcp.h" #include "intel_hotplug.h" #include "intel_overlay.h" +#include "intel_panel.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" +#include "intel_pcode.h" #include "intel_pipe_crc.h" +#include "intel_plane_initial.h" #include "intel_pm.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" -#include "intel_sideband.h" #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vga.h" #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "vlv_dsi_pll.h" +#include "vlv_sideband.h" +#include "vlv_dsi.h" -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); - -static int intel_framebuffer_init(struct intel_framebuffer *ifb, - struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd); static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, @@ -120,186 +123,105 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta const struct intel_link_m_n *m2_n2); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); -static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state); +static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); static void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); -struct i915_dpt { - struct i915_address_space vm; - - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - void __iomem *iomem; -}; - -#define i915_is_dpt(vm) ((vm)->is_dpt) - -static inline struct i915_dpt * -i915_vm_to_dpt(struct i915_address_space *vm) -{ - BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); - GEM_BUG_ON(!i915_is_dpt(vm)); - return container_of(vm, struct i915_dpt, vm); -} - -#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) - -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - -static void dpt_insert_page(struct i915_address_space *vm, - dma_addr_t addr, - u64 offset, - enum i915_cache_level level, - u32 flags) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - - gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, - vm->pte_encode(addr, level, flags)); -} - -static void dpt_insert_entries(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level level, - u32 flags) +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * @dev_priv: i915 device + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +static void intel_update_watermarks(struct drm_i915_private *dev_priv) { - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - gen8_pte_t __iomem *base = dpt->iomem; - const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); - struct sgt_iter sgt_iter; - dma_addr_t addr; - int i; - - /* - * Note that we ignore PTE_READ_ONLY here. The caller must be careful - * not to allow the user to override access to a read only page. - */ - - i = vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(&base[i++], pte_encode | addr); + if (dev_priv->wm_disp->update_wm) + dev_priv->wm_disp->update_wm(dev_priv); } -static void dpt_clear_range(struct i915_address_space *vm, - u64 start, u64 length) +static int intel_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (dev_priv->wm_disp->compute_pipe_wm) + return dev_priv->wm_disp->compute_pipe_wm(state, crtc); + return 0; } -static void dpt_bind_vma(struct i915_address_space *vm, - struct i915_vm_pt_stash *stash, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static int intel_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_gem_object *obj = vma->obj; - u32 pte_flags; - - /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ - pte_flags = 0; - if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) - pte_flags |= PTE_READ_ONLY; - if (i915_gem_object_is_lmem(obj)) - pte_flags |= PTE_LM; - - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); - - vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; - - /* - * Without aliasing PPGTT there's no difference between - * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally - * upgrade to both bound if we bind either to avoid double-binding. - */ - atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (!dev_priv->wm_disp->compute_intermediate_wm) + return 0; + if (drm_WARN_ON(&dev_priv->drm, + !dev_priv->wm_disp->compute_pipe_wm)) + return 0; + return dev_priv->wm_disp->compute_intermediate_wm(state, crtc); } -static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) +static bool intel_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - vm->clear_range(vm, vma->node.start, vma->size); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (dev_priv->wm_disp->initial_watermarks) { + dev_priv->wm_disp->initial_watermarks(state, crtc); + return true; + } + return false; } -static void dpt_cleanup(struct i915_address_space *vm) +static void intel_atomic_update_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_gem_object_put(dpt->obj); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (dev_priv->wm_disp->atomic_update_watermarks) + dev_priv->wm_disp->atomic_update_watermarks(state, crtc); } -static struct i915_address_space * -intel_dpt_create(struct intel_framebuffer *fb) +static void intel_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; - struct drm_i915_private *i915 = to_i915(obj->dev); - struct drm_i915_gem_object *dpt_obj; - struct i915_address_space *vm; - struct i915_dpt *dpt; - size_t size; - int ret; - - if (intel_fb_needs_pot_stride_remap(fb)) - size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); - else - size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); - - size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); - - if (HAS_LMEM(i915)) - dpt_obj = i915_gem_object_create_lmem(i915, size, 0); - else - dpt_obj = i915_gem_object_create_stolen(i915, size); - if (IS_ERR(dpt_obj)) - return ERR_CAST(dpt_obj); - - ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); - if (ret) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(ret); - } - - dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); - if (!dpt) { - i915_gem_object_put(dpt_obj); - return ERR_PTR(-ENOMEM); - } - - vm = &dpt->vm; - - vm->gt = &i915->gt; - vm->i915 = i915; - vm->dma = i915->drm.dev; - vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; - vm->is_dpt = true; - - i915_address_space_init(vm, VM_CLASS_DPT); - - vm->insert_page = dpt_insert_page; - vm->clear_range = dpt_clear_range; - vm->insert_entries = dpt_insert_entries; - vm->cleanup = dpt_cleanup; - - vm->vma_ops.bind_vma = dpt_bind_vma; - vm->vma_ops.unbind_vma = dpt_unbind_vma; - vm->vma_ops.set_pages = ggtt_set_pages; - vm->vma_ops.clear_pages = clear_pages; - - vm->pte_encode = gen8_ggtt_pte_encode; - - dpt->obj = dpt_obj; - - return &dpt->vm; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (dev_priv->wm_disp->optimize_watermarks) + dev_priv->wm_disp->optimize_watermarks(state, crtc); } -static void intel_dpt_destroy(struct i915_address_space *vm) +static int intel_compute_global_watermarks(struct intel_atomic_state *state) { - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vm_close(&dpt->vm); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + if (dev_priv->wm_disp->compute_global_watermarks) + return dev_priv->wm_disp->compute_global_watermarks(state); + return 0; } /* returns HPLL frequency in kHz */ @@ -359,6 +281,12 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv) dev_priv->czclk_freq); } +static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) +{ + return (crtc_state->active_planes & + ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; +} + /* WA Display #0827: Gen9:all */ static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) @@ -384,6 +312,15 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); } +/* Wa_1604331009:icl,jsl,ehl */ +static void +icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, + bool enable) +{ + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, + enable ? CURSOR_GATING_DIS : 0); +} + static bool is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { @@ -403,6 +340,14 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) is_trans_port_sync_slave(crtc_state); } +static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->bigjoiner_slave) + return crtc_state->bigjoiner_linked_crtc; + else + return to_intel_crtc(crtc_state->uapi.crtc); +} + static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -464,168 +409,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) } } -/* Only for pre-ILK configs */ -void assert_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - u32 val; - bool cur_state; - - val = intel_de_read(dev_priv, DPLL(pipe)); - cur_state = !!(val & DPLL_VCO_ENABLE); - I915_STATE_WARN(cur_state != state, - "PLL state assertion failure (expected %s, current %s)\n", - onoff(state), onoff(cur_state)); -} - -/* XXX: the dsi pll is shared between MIPI DSI ports */ -void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) -{ - u32 val; - bool cur_state; - - vlv_cck_get(dev_priv); - val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); - vlv_cck_put(dev_priv); - - cur_state = val & DSI_PLL_VCO_EN; - I915_STATE_WARN(cur_state != state, - "DSI PLL state assertion failure (expected %s, current %s)\n", - onoff(state), onoff(cur_state)); -} - -static void assert_fdi_tx(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - bool cur_state; - - if (HAS_DDI(dev_priv)) { - /* - * DDI does not have a specific FDI_TX register. - * - * FDI is never fed from EDP transcoder - * so pipe->transcoder cast is fine here. - */ - enum transcoder cpu_transcoder = (enum transcoder)pipe; - u32 val = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); - } else { - u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); - cur_state = !!(val & FDI_TX_ENABLE); - } - I915_STATE_WARN(cur_state != state, - "FDI TX state assertion failure (expected %s, current %s)\n", - onoff(state), onoff(cur_state)); -} -#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) -#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) - -static void assert_fdi_rx(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - u32 val; - bool cur_state; - - val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); - cur_state = !!(val & FDI_RX_ENABLE); - I915_STATE_WARN(cur_state != state, - "FDI RX state assertion failure (expected %s, current %s)\n", - onoff(state), onoff(cur_state)); -} -#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) -#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) - -static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 val; - - /* ILK FDI PLL is always enabled */ - if (IS_IRONLAKE(dev_priv)) - return; - - /* On Haswell, DDI ports are responsible for the FDI PLL setup */ - if (HAS_DDI(dev_priv)) - return; - - val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); - I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); -} - -void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) -{ - u32 val; - bool cur_state; - - val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); - cur_state = !!(val & FDI_RX_PLL_ENABLE); - I915_STATE_WARN(cur_state != state, - "FDI RX PLL assertion failure (expected %s, current %s)\n", - onoff(state), onoff(cur_state)); -} - -void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - i915_reg_t pp_reg; - u32 val; - enum pipe panel_pipe = INVALID_PIPE; - bool locked = true; - - if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) - return; - - if (HAS_PCH_SPLIT(dev_priv)) { - u32 port_sel; - - pp_reg = PP_CONTROL(0); - port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; - - switch (port_sel) { - case PANEL_PORT_SELECT_LVDS: - intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); - break; - case PANEL_PORT_SELECT_DPA: - g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); - break; - case PANEL_PORT_SELECT_DPC: - g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); - break; - case PANEL_PORT_SELECT_DPD: - g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); - break; - default: - MISSING_CASE(port_sel); - break; - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - /* presumably write lock depends on pipe, not port select */ - pp_reg = PP_CONTROL(pipe); - panel_pipe = pipe; - } else { - u32 port_sel; - - pp_reg = PP_CONTROL(0); - port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; - - drm_WARN_ON(&dev_priv->drm, - port_sel != PANEL_PORT_SELECT_LVDS); - intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); - } - - val = intel_de_read(dev_priv, pp_reg); - if (!(val & PANEL_POWER_ON) || - ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) - locked = false; - - I915_STATE_WARN(panel_pipe == pipe && locked, - "panel assertion failure, pipe %c regs locked\n", - pipe_name(pipe)); -} - -void assert_pipe(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder, bool state) +void assert_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, bool state) { bool cur_state; enum intel_display_power_domain power_domain; @@ -676,80 +461,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc) assert_plane_disabled(plane); } -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 val; - bool enabled; - - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); - enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(enabled, - "transcoder assertion failed, should be off on pipe %c but is still active\n", - pipe_name(pipe)); -} - -static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t dp_reg) -{ - enum pipe port_pipe; - bool state; - - state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH DP %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH DP %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t hdmi_reg) -{ - enum pipe port_pipe; - bool state; - - state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH HDMI %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - enum pipe port_pipe; - - assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); - assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); - assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - - I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && - port_pipe == pipe, - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && - port_pipe == pipe, - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - /* PCH SDVOB multiplex with HDMIB */ - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); -} - void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask) @@ -784,154 +495,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, expected_mask); } -static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - i915_reg_t reg; - u32 val, pipeconf_val; - - /* Make sure PCH DPLL is enabled */ - assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, pipe); - assert_fdi_rx_enabled(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - /* - * Workaround: Set the timing override bit - * before enabling the pch transcoder. - */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, reg, val); - } - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); - - if (HAS_PCH_IBX(dev_priv)) { - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_FRAME_START_DELAY_MASK; - val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - - /* - * Make the BPC in transcoder be consistent with - * that in pipeconf reg. For HDMI we must use 8bpc - * here for both 8bpc and 12bpc. - */ - val &= ~PIPECONF_BPC_MASK; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_8BPC; - else - val |= pipeconf_val & PIPECONF_BPC_MASK; - } - - val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { - if (HAS_PCH_IBX(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= TRANS_LEGACY_INTERLACED_ILK; - else - val |= TRANS_INTERLACED; - } else { - val |= TRANS_PROGRESSIVE; - } - - intel_de_write(dev_priv, reg, val | TRANS_ENABLE); - if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", - pipe_name(pipe)); -} - -static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 val, pipeconf_val; - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); - assert_fdi_rx_enabled(dev_priv, PIPE_A); - - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - /* Workaround: set timing override bit. */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); - - val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; - else - val |= TRANS_PROGRESSIVE; - - intel_de_write(dev_priv, LPT_TRANSCONF, val); - if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); -} - -static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - i915_reg_t reg; - u32 val; - - /* FDI relies on the transcoder */ - assert_fdi_tx_disabled(dev_priv, pipe); - assert_fdi_rx_disabled(dev_priv, pipe); - - /* Ports must be off as well */ - assert_pch_ports_disabled(dev_priv, pipe); - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, reg, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", - pipe_name(pipe)); - - if (HAS_PCH_CPT(dev_priv)) { - /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, reg, val); - } -} - -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = intel_de_read(dev_priv, LPT_TRANSCONF); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); - - /* Workaround: clear timing override bit. */ - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); -} - enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -942,7 +505,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) return crtc->pipe; } -void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) +void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1003,7 +566,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) intel_wait_for_pipe_scanline_moving(crtc); } -void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) +void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1045,77 +608,6 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) intel_wait_for_pipe_off(old_crtc_state); } -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier) -{ - return info->is_yuv && - info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); -} - -unsigned int -intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) -{ - struct drm_i915_private *dev_priv = to_i915(fb->dev); - unsigned int cpp = fb->format->cpp[color_plane]; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - return intel_tile_size(dev_priv); - case I915_FORMAT_MOD_X_TILED: - if (DISPLAY_VER(dev_priv) == 2) - return 128; - else - return 512; - case I915_FORMAT_MOD_Y_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) - return 128; - fallthrough; - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (is_ccs_plane(fb, color_plane)) - return 64; - fallthrough; - case I915_FORMAT_MOD_Y_TILED: - if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv)) - return 128; - else - return 512; - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) - return 128; - fallthrough; - case I915_FORMAT_MOD_Yf_TILED: - switch (cpp) { - case 1: - return 64; - case 2: - case 4: - return 128; - case 8: - case 16: - return 256; - default: - MISSING_CASE(cpp); - return cpp; - } - break; - default: - MISSING_CASE(fb->modifier); - return cpp; - } -} - -unsigned int -intel_fb_align_height(const struct drm_framebuffer *fb, - int color_plane, unsigned int height) -{ - unsigned int tile_height = intel_tile_height(fb, color_plane); - - return ALIGN(height, tile_height); -} - unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) { unsigned int size = 0; @@ -1132,283 +624,36 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info unsigned int size = 0; int i; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) - size += rem_info->plane[i].dst_stride * rem_info->plane[i].height; - - return size; -} - -static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 9) - return 256 * 1024; - else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return 128 * 1024; - else if (DISPLAY_VER(dev_priv) >= 4) - return 4 * 1024; - else - return 0; -} - -static bool has_async_flips(struct drm_i915_private *i915) -{ - return DISPLAY_VER(i915) >= 5; -} - -unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, - int color_plane) -{ - struct drm_i915_private *dev_priv = to_i915(fb->dev); - - if (intel_fb_uses_dpt(fb)) - return 512 * 4096; + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { + unsigned int plane_size; - /* AUX_DIST needs only 4K alignment */ - if (is_ccs_plane(fb, color_plane)) - return 4096; + if (rem_info->plane[i].linear) + plane_size = rem_info->plane[i].size; + else + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; - if (is_semiplanar_uv_plane(fb, color_plane)) { - /* - * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes - * alignment for linear UV planes on all platforms. - */ - if (DISPLAY_VER(dev_priv) >= 12) { - if (fb->modifier == DRM_FORMAT_MOD_LINEAR) - return intel_linear_alignment(dev_priv); + if (plane_size == 0) + continue; - return intel_tile_row_size(fb, color_plane); - } + if (rem_info->plane_alignment) + size = ALIGN(size, rem_info->plane_alignment); - return 4096; + size += plane_size; } - drm_WARN_ON(&dev_priv->drm, color_plane != 0); - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - return intel_linear_alignment(dev_priv); - case I915_FORMAT_MOD_X_TILED: - if (has_async_flips(dev_priv)) - return 256 * 1024; - return 0; - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - return 16 * 1024; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - return 1 * 1024 * 1024; - default: - MISSING_CASE(fb->modifier); - return 0; - } + return size; } -static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) +bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return DISPLAY_VER(dev_priv) < 4 || - (plane->has_fbc && + (plane->fbc && plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); } -static struct i915_vma * -intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, - const struct i915_ggtt_view *view, - bool uses_fence, - unsigned long *out_flags, - struct i915_address_space *vm) -{ - struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct i915_vma *vma; - u32 alignment; - int ret; - - if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) - return ERR_PTR(-EINVAL); - - alignment = 4096 * 512; - - atomic_inc(&dev_priv->gpu_error.pending_fb_pin); - - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); - if (ret) { - vma = ERR_PTR(ret); - goto err; - } - - vma = i915_vma_instance(obj, vm, view); - if (IS_ERR(vma)) - goto err; - - if (i915_vma_misplaced(vma, 0, alignment, 0)) { - ret = i915_vma_unbind(vma); - if (ret) { - vma = ERR_PTR(ret); - goto err; - } - } - - ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL); - if (ret) { - vma = ERR_PTR(ret); - goto err; - } - - vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - - i915_gem_object_flush_if_display(obj); - - i915_vma_get(vma); -err: - atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - - return vma; -} - -struct i915_vma * -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - bool phys_cursor, - const struct i915_ggtt_view *view, - bool uses_fence, - unsigned long *out_flags) -{ - struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - intel_wakeref_t wakeref; - struct i915_gem_ww_ctx ww; - struct i915_vma *vma; - unsigned int pinctl; - u32 alignment; - int ret; - - if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) - return ERR_PTR(-EINVAL); - - if (phys_cursor) - alignment = intel_cursor_alignment(dev_priv); - else - alignment = intel_surf_alignment(fb, 0); - if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) - return ERR_PTR(-EINVAL); - - /* Note that the w/a also requires 64 PTE of padding following the - * bo. We currently fill all unused PTE with the shadow page and so - * we should always have valid PTE following the scanout preventing - * the VT-d warning. - */ - if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) - alignment = 256 * 1024; - - /* - * Global gtt pte registers are special registers which actually forward - * writes to a chunk of system memory. Which means that there is no risk - * that the register values disappear as soon as we call - * intel_runtime_pm_put(), so it is correct to wrap only the - * pin/unpin/fence and not more. - */ - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - atomic_inc(&dev_priv->gpu_error.pending_fb_pin); - - /* - * Valleyview is definitely limited to scanning out the first - * 512MiB. Lets presume this behaviour was inherited from the - * g4x display engine and that all earlier gen are similarly - * limited. Testing suggests that it is a little more - * complicated than this. For example, Cherryview appears quite - * happy to scanout from anywhere within its global aperture. - */ - pinctl = 0; - if (HAS_GMCH(dev_priv)) - pinctl |= PIN_MAPPABLE; - - i915_gem_ww_ctx_init(&ww, true); -retry: - ret = i915_gem_object_lock(obj, &ww); - if (!ret && phys_cursor) - ret = i915_gem_object_attach_phys(obj, alignment); - else if (!ret && HAS_LMEM(dev_priv)) - ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM); - /* TODO: Do we need to sync when migration becomes async? */ - if (!ret) - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - - if (!ret) { - vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, - view, pinctl); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unpin; - } - } - - if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { - /* - * Install a fence for tiled scan-out. Pre-i965 always needs a - * fence, whereas 965+ only requires a fence if using - * framebuffer compression. For simplicity, we always, when - * possible, install a fence as the cost is not that onerous. - * - * If we fail to fence the tiled scanout, then either the - * modeset will reject the change (which is highly unlikely as - * the affected systems, all but one, do not have unmappable - * space) or we will not be able to enable full powersaving - * techniques (also likely not to apply due to various limits - * FBC and the like impose on the size of the buffer, which - * presumably we violated anyway with this unmappable buffer). - * Anyway, it is presumably better to stumble onwards with - * something and try to run the system in a "less than optimal" - * mode that matches the user configuration. - */ - ret = i915_vma_pin_fence(vma); - if (ret != 0 && DISPLAY_VER(dev_priv) < 4) { - i915_vma_unpin(vma); - goto err_unpin; - } - ret = 0; - - if (vma->fence) - *out_flags |= PLANE_HAS_FENCE; - } - - i915_vma_get(vma); - -err_unpin: - i915_gem_object_unpin_pages(obj); -err: - if (ret == -EDEADLK) { - ret = i915_gem_ww_ctx_backoff(&ww); - if (!ret) - goto retry; - } - i915_gem_ww_ctx_fini(&ww); - if (ret) - vma = ERR_PTR(ret); - - atomic_dec(&dev_priv->gpu_error.pending_fb_pin); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - return vma; -} - -void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) -{ - if (flags & PLANE_HAS_FENCE) - i915_vma_unpin_fence(vma); - i915_vma_unpin(vma); - i915_vma_put(vma); -} - /* * Convert the x/y offsets into a linear offset. * Only valid with 0/180 degree rotation, which is fine since linear @@ -1421,7 +666,7 @@ u32 intel_fb_xy_to_linear(int x, int y, { const struct drm_framebuffer *fb = state->hw.fb; unsigned int cpp = fb->format->cpp[color_plane]; - unsigned int pitch = state->view.color_plane[color_plane].stride; + unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; return y * pitch + x * cpp; } @@ -1440,158 +685,6 @@ void intel_add_fb_offsets(int *x, int *y, *y += state->view.color_plane[color_plane].y; } -static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) -{ - switch (fb_modifier) { - case I915_FORMAT_MOD_X_TILED: - return I915_TILING_X; - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - return I915_TILING_Y; - default: - return I915_TILING_NONE; - } -} - -/* - * From the Sky Lake PRM: - * "The Color Control Surface (CCS) contains the compression status of - * the cache-line pairs. The compression state of the cache-line pair - * is specified by 2 bits in the CCS. Each CCS cache-line represents - * an area on the main surface of 16 x16 sets of 128 byte Y-tiled - * cache-line-pairs. CCS is always Y tiled." - * - * Since cache line pairs refers to horizontally adjacent cache lines, - * each cache line in the CCS corresponds to an area of 32x16 cache - * lines on the main surface. Since each pixel is 4 bytes, this gives - * us a ratio of one byte in the CCS for each 8x16 pixels in the - * main surface. - */ -static const struct drm_format_info skl_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, -}; - -/* - * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the - * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles - * in the main surface. With 4 byte pixels and each Y-tile having dimensions of - * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in - * the main surface. - */ -static const struct drm_format_info gen12_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_YUYV, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_YVYU, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_UYVY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_VYUY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_NV12, .num_planes = 4, - .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P010, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P012, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P016, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, -}; - -/* - * Same as gen12_ccs_formats[] above, but with additional surface used - * to pass Clear Color information in plane 2 with 64 bits of data. - */ -static const struct drm_format_info gen12_ccs_cc_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, -}; - -static const struct drm_format_info * -lookup_format_info(const struct drm_format_info formats[], - int num_formats, u32 format) -{ - int i; - - for (i = 0; i < num_formats; i++) { - if (formats[i].format == format) - return &formats[i]; - } - - return NULL; -} - -static const struct drm_format_info * -intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) -{ - switch (cmd->modifier[0]) { - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - return lookup_format_info(skl_ccs_formats, - ARRAY_SIZE(skl_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - return lookup_format_info(gen12_ccs_formats, - ARRAY_SIZE(gen12_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - return lookup_format_info(gen12_ccs_cc_formats, - ARRAY_SIZE(gen12_ccs_cc_formats), - cmd->pixel_format); - default: - return NULL; - } -} - -static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane) -{ - return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)], - 512) * 64; -} - u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { @@ -1606,7 +699,7 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, * the highest stride limits of them all, * if in case pipe A is disabled, use the first pipe from pipe_mask. */ - crtc = intel_get_first_crtc(dev_priv); + crtc = intel_first_crtc(dev_priv); if (!crtc) return 0; @@ -1616,188 +709,6 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0); } -static -u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, - u32 pixel_format, u64 modifier) -{ - /* - * Arbitrary limit for gen4+ chosen to match the - * render engine max stride. - * - * The new CCS hash mode makes remapping impossible - */ - if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) || - intel_modifier_uses_dpt(dev_priv, modifier)) - return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); - else if (DISPLAY_VER(dev_priv) >= 7) - return 256 * 1024; - else - return 128 * 1024; -} - -static u32 -intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) -{ - struct drm_i915_private *dev_priv = to_i915(fb->dev); - u32 tile_width; - - if (is_surface_linear(fb, color_plane)) { - u32 max_stride = intel_plane_fb_max_stride(dev_priv, - fb->format->format, - fb->modifier); - - /* - * To make remapping with linear generally feasible - * we need the stride to be page aligned. - */ - if (fb->pitches[color_plane] > max_stride && - !is_ccs_modifier(fb->modifier)) - return intel_tile_size(dev_priv); - else - return 64; - } - - tile_width = intel_tile_width_bytes(fb, color_plane); - if (is_ccs_modifier(fb->modifier)) { - /* - * Display WA #0531: skl,bxt,kbl,glk - * - * Render decompression and plane width > 3840 - * combined with horizontal panning requires the - * plane stride to be a multiple of 4. We'll just - * require the entire fb to accommodate that to avoid - * potential runtime errors at plane configuration time. - */ - if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && - color_plane == 0 && fb->width > 3840) - tile_width *= 4; - /* - * The main surface pitch must be padded to a multiple of four - * tile widths. - */ - else if (DISPLAY_VER(dev_priv) >= 12) - tile_width *= 4; - } - return tile_width; -} - -static struct i915_vma * -initial_plane_vma(struct drm_i915_private *i915, - struct intel_initial_plane_config *plane_config) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 base, size; - - if (plane_config->size == 0) - return NULL; - - base = round_down(plane_config->base, - I915_GTT_MIN_ALIGNMENT); - size = round_up(plane_config->base + plane_config->size, - I915_GTT_MIN_ALIGNMENT); - size -= base; - - /* - * If the FB is too big, just don't use it since fbdev is not very - * important and we should probably use that space with FBC or other - * features. - */ - if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) && - size * 2 > i915->stolen_usable_size) - return NULL; - - obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); - if (IS_ERR(obj)) - return NULL; - - /* - * Mark it WT ahead of time to avoid changing the - * cache_level during fbdev initialization. The - * unbind there would get stuck waiting for rcu. - */ - i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? - I915_CACHE_WT : I915_CACHE_NONE); - - switch (plane_config->tiling) { - case I915_TILING_NONE: - break; - case I915_TILING_X: - case I915_TILING_Y: - obj->tiling_and_stride = - plane_config->fb->base.pitches[0] | - plane_config->tiling; - break; - default: - MISSING_CASE(plane_config->tiling); - goto err_obj; - } - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) - goto err_obj; - - if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) - goto err_obj; - - if (i915_gem_object_is_tiled(obj) && - !i915_vma_is_map_and_fenceable(vma)) - goto err_obj; - - return vma; - -err_obj: - i915_gem_object_put(obj); - return NULL; -} - -static bool -intel_alloc_initial_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - struct drm_framebuffer *fb = &plane_config->fb->base; - struct i915_vma *vma; - - switch (fb->modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - break; - default: - drm_dbg(&dev_priv->drm, - "Unsupported modifier for initial FB: 0x%llx\n", - fb->modifier); - return false; - } - - vma = initial_plane_vma(dev_priv, plane_config); - if (!vma) - return false; - - mode_cmd.pixel_format = fb->format->format; - mode_cmd.width = fb->width; - mode_cmd.height = fb->height; - mode_cmd.pitches[0] = fb->pitches[0]; - mode_cmd.modifier[0] = fb->modifier; - mode_cmd.flags = DRM_MODE_FB_MODIFIERS; - - if (intel_framebuffer_init(to_intel_framebuffer(fb), - vma->obj, &mode_cmd)) { - drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); - goto err_vma; - } - - plane_config->vma = vma; - return true; - -err_vma: - i915_vma_put(vma); - return false; -} - static void intel_set_plane_visible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state, @@ -1833,8 +744,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) } } -static void intel_plane_disable_noatomic(struct intel_crtc *crtc, - struct intel_plane *plane) +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = @@ -1866,7 +777,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, */ if (HAS_GMCH(dev_priv) && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -1875,170 +786,8 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_disable_plane(plane, crtc_state); - intel_wait_for_vblank(dev_priv, crtc->pipe); -} - -static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) -{ - struct drm_i915_private *i915 = vm->i915; - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - intel_wakeref_t wakeref; - struct i915_vma *vma; - void __iomem *iomem; - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - atomic_inc(&i915->gpu_error.pending_fb_pin); - - vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096, - HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); - if (IS_ERR(vma)) - goto err; - - iomem = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(iomem)) { - vma = iomem; - goto err; - } - - dpt->vma = vma; - dpt->iomem = iomem; - - i915_vma_get(vma); - -err: - atomic_dec(&i915->gpu_error.pending_fb_pin); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - - return vma; -} - -static void intel_dpt_unpin(struct i915_address_space *vm) -{ - struct i915_dpt *dpt = i915_vm_to_dpt(vm); - - i915_vma_unpin_iomap(dpt->vma); - i915_vma_put(dpt->vma); -} - -static bool -intel_reuse_initial_plane_obj(struct drm_i915_private *i915, - const struct intel_initial_plane_config *plane_config, - struct drm_framebuffer **fb, - struct i915_vma **vma) -{ - struct intel_crtc *crtc; - - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane *plane = - to_intel_plane(crtc->base.primary); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - if (!crtc_state->uapi.active) - continue; - - if (!plane_state->ggtt_vma) - continue; - - if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { - *fb = plane_state->hw.fb; - *vma = plane_state->ggtt_vma; - return true; - } - } - - return false; -} - -static void -intel_find_initial_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane *plane = - to_intel_plane(crtc->base.primary); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - struct drm_framebuffer *fb; - struct i915_vma *vma; - - /* - * TODO: - * Disable planes if get_initial_plane_config() failed. - * Make sure things work if the surface base is not page aligned. - */ - if (!plane_config->fb) - return; - - if (intel_alloc_initial_plane_obj(crtc, plane_config)) { - fb = &plane_config->fb->base; - vma = plane_config->vma; - goto valid_fb; - } - - /* - * Failed to alloc the obj, check to see if we should share - * an fb with another CRTC instead - */ - if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma)) - goto valid_fb; - - /* - * We've failed to reconstruct the BIOS FB. Current display state - * indicates that the primary plane is visible, but has a NULL FB, - * which will lead to problems later if we don't fix it up. The - * simplest solution is to just disable the primary plane now and - * pretend the BIOS never had it enabled. - */ - intel_plane_disable_noatomic(crtc, plane); - if (crtc_state->bigjoiner) { - struct intel_crtc *slave = - crtc_state->bigjoiner_linked_crtc; - intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary)); - } - - return; - -valid_fb: - plane_state->uapi.rotation = plane_config->rotation; - intel_fb_fill_view(to_intel_framebuffer(fb), - plane_state->uapi.rotation, &plane_state->view); - - __i915_vma_pin(vma); - plane_state->ggtt_vma = i915_vma_get(vma); - if (intel_plane_uses_fence(plane_state) && - i915_vma_pin_fence(vma) == 0 && vma->fence) - plane_state->flags |= PLANE_HAS_FENCE; - - plane_state->uapi.src_x = 0; - plane_state->uapi.src_y = 0; - plane_state->uapi.src_w = fb->width << 16; - plane_state->uapi.src_h = fb->height << 16; - - plane_state->uapi.crtc_x = 0; - plane_state->uapi.crtc_y = 0; - plane_state->uapi.crtc_w = fb->width; - plane_state->uapi.crtc_h = fb->height; - - if (plane_config->tiling) - dev_priv->preserve_bios_swizzle = true; - - plane_state->uapi.fb = fb; - drm_framebuffer_get(fb); - - plane_state->uapi.crtc = &crtc->base; - intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc); - - intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); - - atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); + intel_plane_disable_arm(plane, crtc_state); + intel_crtc_wait_for_next_vblank(crtc); } unsigned int @@ -2212,26 +961,6 @@ unlock: clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); } -static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state) -{ - if (crtc_state->pch_pfit.enabled && - (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) || - crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) || - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)) - return false; - - if (crtc_state->dsc.compression_enable) - return false; - - if (crtc_state->has_psr2) - return false; - - if (crtc_state->splitter.enable) - return false; - - return true; -} - static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -2255,19 +984,18 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) */ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; - if (IS_DG2(dev_priv)) { - /* - * Underrun recovery must always be disabled on DG2. However - * the chicken bit meaning is inverted compared to other - * platforms. - */ + /* + * Underrun recovery must always be disabled on display 13+. + * DG2 chicken bit meaning is inverted compared to other platforms. + */ + if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; - } else if (DISPLAY_VER(dev_priv) >= 13) { - if (underrun_recovery_supported(crtc_state)) - tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP; - else - tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; - } + else if (DISPLAY_VER(dev_priv) >= 13) + tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; + + /* Wa_14010547955:dg2 */ + if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER)) + tmp |= DG2_RENDER_CCSTAG_4_3_EN; intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } @@ -2289,7 +1017,7 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) if (cleanup_done) continue; - drm_crtc_wait_one_vblank(crtc); + intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); return true; } @@ -2297,207 +1025,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) return false; } -void lpt_disable_iclkip(struct drm_i915_private *dev_priv) -{ - u32 temp; - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Program iCLKIP clock to the desired frequency */ -static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int clock = crtc_state->hw.adjusted_mode.crtc_clock; - u32 divsel, phaseinc, auxdiv, phasedir = 0; - u32 temp; - - lpt_disable_iclkip(dev_priv); - - /* The iCLK virtual clock root frequency is in MHz, - * but the adjusted_mode->crtc_clock in in KHz. To get the - * divisors, it is necessary to divide one by another, so we - * convert the virtual clock precision to KHz here for higher - * precision. - */ - for (auxdiv = 0; auxdiv < 2; auxdiv++) { - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - - desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - clock << auxdiv); - divsel = (desired_divisor / iclk_pi_range) - 2; - phaseinc = desired_divisor % iclk_pi_range; - - /* - * Near 20MHz is a corner case which is - * out of range for the 7-bit divisor - */ - if (divsel <= 0x7f) - break; - } - - /* This should not happen with any sane values */ - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & - ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & - ~SBI_SSCDIVINTPHASE_INCVAL_MASK); - - drm_dbg_kms(&dev_priv->drm, - "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", - clock, auxdiv, divsel, phasedir, phaseinc); - - mutex_lock(&dev_priv->sb_lock); - - /* Program SSCDIVINTPHASE6 */ - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; - temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); - temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; - temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); - temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); - temp |= SBI_SSCDIVINTPHASE_PROPAGATE; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); - - /* Program SSCAUXDIV */ - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); - temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); - intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); - - /* Enable modulator and associated divider */ - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp &= ~SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); - - /* Wait for initialization time */ - udelay(24); - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); -} - -int lpt_get_iclkip(struct drm_i915_private *dev_priv) -{ - u32 divsel, phaseinc, auxdiv; - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - u32 temp; - - if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) - return 0; - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - if (temp & SBI_SSCCTL_DISABLE) { - mutex_unlock(&dev_priv->sb_lock); - return 0; - } - - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> - SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; - phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> - SBI_SSCDIVINTPHASE_INCVAL_SHIFT; - - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> - SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; - - mutex_unlock(&dev_priv->sb_lock); - - desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; - - return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - desired_divisor << auxdiv); -} - -static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, - enum pipe pch_transcoder) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); - - intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); -} - -static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) -{ - u32 temp; - - temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); - if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) - return; - - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & - FDI_RX_ENABLE); - drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & - FDI_RX_ENABLE); - - temp &= ~FDI_BC_BIFURCATION_SELECT; - if (enable) - temp |= FDI_BC_BIFURCATION_SELECT; - - drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", - enable ? "en" : "dis"); - intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); - intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); -} - -static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - switch (crtc->pipe) { - case PIPE_A: - break; - case PIPE_B: - if (crtc_state->fdi_lanes > 2) - cpt_set_fdi_bc_bifurcation(dev_priv, false); - else - cpt_set_fdi_bc_bifurcation(dev_priv, true); - - break; - case PIPE_C: - cpt_set_fdi_bc_bifurcation(dev_priv, true); - - break; - default: - BUG(); - } -} - /* * Finds the encoder associated with the given CRTC. This can only be * used when we know that the CRTC isn't feeding multiple encoders! @@ -2506,15 +1033,17 @@ struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_connector_state *connector_state; const struct drm_connector *connector; struct intel_encoder *encoder = NULL; + struct intel_crtc *master_crtc; int num_encoders = 0; int i; + master_crtc = intel_master_crtc(crtc_state); + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) + if (connector_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(connector_state->best_encoder); @@ -2523,119 +1052,11 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, drm_WARN(encoder->base.dev, num_encoders != 1, "%d encoders for pipe %c\n", - num_encoders, pipe_name(crtc->pipe)); + num_encoders, pipe_name(master_crtc->pipe)); return encoder; } -/* - * Enable PCH resources required for PCH ports: - * - PCH PLLs - * - FDI training & RX/TX - * - update transcoder timings - * - DP transcoding bits - * - transcoder - */ -static void ilk_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - u32 temp; - - assert_pch_transcoder_disabled(dev_priv, pipe); - - if (IS_IVYBRIDGE(dev_priv)) - ivb_update_fdi_bc_bifurcation(crtc_state); - - /* Write the TU size bits before fdi link training, so that error - * detection works. */ - intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), - intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); - - /* For PCH output, training FDI link */ - dev_priv->display.fdi_link_train(crtc, crtc_state); - - /* We need to program the right clock selection before writing the pixel - * mutliplier into the DPLL. */ - if (HAS_PCH_CPT(dev_priv)) { - u32 sel; - - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp |= TRANS_DPLL_ENABLE(pipe); - sel = TRANS_DPLLB_SEL(pipe); - if (crtc_state->shared_dpll == - intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) - temp |= sel; - else - temp &= ~sel; - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_shared_dpll tries to do the right thing, but - * get_shared_dpll unconditionally resets the pll - we need that to have - * the right LVDS enable sequence. */ - intel_enable_shared_dpll(crtc_state); - - /* set transcoder timing, panel must allow it */ - assert_panel_unlocked(dev_priv, pipe); - ilk_pch_transcoder_set_timings(crtc_state, pipe); - - intel_fdi_normal_train(crtc); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev_priv) && - intel_crtc_has_dp_encoder(crtc_state)) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; - i915_reg_t reg = TRANS_DP_CTL(pipe); - enum port port; - - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK | - TRANS_DP_BPC_MASK); - temp |= TRANS_DP_OUTPUT_ENABLE; - temp |= bpc << 9; /* same format but at 11:9 */ - - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - port = intel_get_crtc_new_encoder(state, crtc_state)->port; - drm_WARN_ON(dev, port < PORT_B || port > PORT_D); - temp |= TRANS_DP_PORT_SEL(port); - - intel_de_write(dev_priv, reg, temp); - } - - ilk_enable_pch_transcoder(crtc_state); -} - -void lpt_pch_enable(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - assert_pch_transcoder_disabled(dev_priv, PIPE_A); - - lpt_program_iclkip(crtc_state); - - /* Set transcoder timing. */ - ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); - - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); -} - static void cpt_verify_modeset(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -2743,7 +1164,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) } /* We need to wait for a vblank before we can disable the plane. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) @@ -2842,6 +1263,46 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) return false; } +static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + + /* Wa_1604331009:icl,jsl,ehl */ + if (is_hdr_mode(crtc_state) && + crtc_state->active_planes & BIT(PLANE_CURSOR) && + DISPLAY_VER(dev_priv) == 11) + return true; + + return false; +} + +static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, + enum pipe pipe, bool enable) +{ + if (DISPLAY_VER(i915) == 9) { + /* + * "Plane N strech max must be programmed to 11b (x1) + * when Async flips are enabled on that plane." + */ + intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), + SKL_PLANE1_STRETCH_MAX_MASK, + enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); + } else { + /* Also needed on HSW/BDW albeit undocumented */ + intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), + HSW_PRI_STRETCH_MAX_MASK, + enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); + } +} + +static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + return crtc_state->uapi.async_flip && intel_vtd_active(i915) && + (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); +} + static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { @@ -2869,12 +1330,17 @@ static void intel_post_plane_update(struct intel_atomic_state *state, intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) - intel_update_watermarks(crtc); + intel_update_watermarks(dev_priv); if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) hsw_enable_ips(new_crtc_state); intel_fbc_post_update(state, crtc); + intel_drrs_page_flip(state, crtc); + + if (needs_async_flip_vtd_wa(old_crtc_state) && + !needs_async_flip_vtd_wa(new_crtc_state)) + intel_async_flip_vtd_wa(dev_priv, pipe, false); if (needs_nv12_wa(old_crtc_state) && !needs_nv12_wa(new_crtc_state)) @@ -2883,6 +1349,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (needs_scalerclk_wa(old_crtc_state) && !needs_scalerclk_wa(new_crtc_state)) icl_wa_scalerclkgating(dev_priv, pipe, false); + + if (needs_cursorclk_wa(old_crtc_state) && + !needs_cursorclk_wa(new_crtc_state)) + icl_wa_cursorclkgating(dev_priv, pipe, false); + } static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, @@ -2924,7 +1395,6 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = @@ -2950,7 +1420,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, } if (need_vbl_wait) - intel_wait_for_vblank(i915, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void intel_pre_plane_update(struct intel_atomic_state *state, @@ -2963,11 +1433,17 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + intel_psr_pre_plane_update(state, crtc); + if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) hsw_disable_ips(old_crtc_state); if (intel_fbc_pre_update(state, crtc)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + + if (!needs_async_flip_vtd_wa(old_crtc_state) && + needs_async_flip_vtd_wa(new_crtc_state)) + intel_async_flip_vtd_wa(dev_priv, pipe, true); /* Display WA 827 */ if (!needs_nv12_wa(old_crtc_state) && @@ -2979,6 +1455,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, needs_scalerclk_wa(new_crtc_state)) icl_wa_scalerclkgating(dev_priv, pipe, true); + /* Wa_1604331009:icl,jsl,ehl */ + if (!needs_cursorclk_wa(old_crtc_state) && + needs_cursorclk_wa(new_crtc_state)) + icl_wa_cursorclkgating(dev_priv, pipe, true); + /* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that @@ -2990,7 +1471,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * IVB workaround: must disable low power watermarks for at least @@ -3001,7 +1482,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, */ if (old_crtc_state->hw.active && new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); /* * If we're doing a modeset we don't need to do any @@ -3022,10 +1503,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * we'll continue to update watermarks the old way, if flags tell * us to. */ - if (dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, crtc); - else if (new_crtc_state->update_wm_pre) - intel_update_watermarks(crtc); + if (!intel_initial_watermarks(state, crtc)) + if (new_crtc_state->update_wm_pre) + intel_update_watermarks(dev_priv); } /* @@ -3066,7 +1546,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -3100,10 +1580,30 @@ intel_connector_primary_encoder(struct intel_connector *connector) static void intel_encoders_update_prepare(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; struct drm_connector_state *new_conn_state; struct drm_connector *connector; int i; + /* + * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. + * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. + */ + if (i915->dpll.mgr) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (intel_crtc_needs_modeset(new_crtc_state)) + continue; + + new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; + new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; + } + } + + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -3130,6 +1630,9 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state) struct drm_connector *connector; int i; + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -3217,28 +1720,6 @@ static void intel_encoders_enable(struct intel_atomic_state *state, } } -static void intel_encoders_pre_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct drm_connector_state *old_conn_state; - struct drm_connector *conn; - int i; - - for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { - struct intel_encoder *encoder = - to_intel_encoder(old_conn_state->best_encoder); - - if (old_conn_state->crtc != &crtc->base) - continue; - - if (encoder->pre_disable) - encoder->pre_disable(state, encoder, old_crtc_state, - old_conn_state); - } -} - static void intel_encoders_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -3333,7 +1814,7 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - plane->disable_plane(plane, crtc_state); + plane->disable_arm(plane, crtc_state); } static void ilk_crtc_enable(struct intel_atomic_state *state, @@ -3360,9 +1841,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (new_crtc_state->has_pch_encoder) - intel_prepare_shared_dpll(new_crtc_state); - if (intel_crtc_has_dp_encoder(new_crtc_state)) intel_dp_set_m_n(new_crtc_state, M1_N1); @@ -3400,12 +1878,11 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); - if (dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, crtc); - intel_enable_pipe(new_crtc_state); + intel_initial_watermarks(state, crtc); + intel_enable_transcoder(new_crtc_state); if (new_crtc_state->has_pch_encoder) - ilk_pch_enable(state, new_crtc_state); + ilk_pch_enable(state, crtc); intel_crtc_vblank_on(new_crtc_state); @@ -3421,8 +1898,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, * in case there are more corner cases we don't know about. */ if (new_crtc_state->has_pch_encoder) { - intel_wait_for_vblank(dev_priv, pipe); - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); + intel_crtc_wait_for_next_vblank(crtc); } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -3497,42 +1974,39 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(master->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; struct drm_connector_state *conn_state; struct drm_connector *conn; struct intel_encoder *encoder = NULL; int i; - if (crtc_state->bigjoiner_slave) - master = crtc_state->bigjoiner_linked_crtc; - - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); for_each_new_connector_in_state(&state->base, conn, conn_state, i) { - if (conn_state->crtc != &master->base) + if (conn_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(conn_state->best_encoder); break; } - if (!crtc_state->bigjoiner_slave) { - /* need to enable VDSC, which we skipped in pre-enable */ - intel_dsc_enable(encoder, crtc_state); - } else { - /* - * Enable sequence steps 1-7 on bigjoiner master - */ - intel_encoders_pre_pll_enable(state, master); - if (master_crtc_state->shared_dpll) - intel_enable_shared_dpll(master_crtc_state); - intel_encoders_pre_enable(state, master); + /* + * Enable sequence steps 1-7 on bigjoiner master + */ + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_pll_enable(state, master_crtc); - /* and DSC on slave */ - intel_dsc_enable(NULL, crtc_state); - } + if (crtc_state->shared_dpll) + intel_enable_shared_dpll(crtc_state); + + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_enable(state, master_crtc); + + /* need to enable VDSC, which we skipped in pre-enable */ + intel_dsc_enable(crtc_state); if (DISPLAY_VER(dev_priv) >= 13) intel_uncompressed_joiner_enable(crtc_state); @@ -3578,10 +2052,9 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, &new_crtc_state->fdi_m_n, NULL); hsw_set_frame_start_delay(new_crtc_state); - } - if (!transcoder_is_dsi(cpu_transcoder)) - hsw_set_pipeconf(new_crtc_state); + hsw_set_transconf(new_crtc_state); + } crtc->active = true; @@ -3611,8 +2084,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 11) icl_set_pipe_chicken(new_crtc_state); - if (dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, crtc); + intel_initial_watermarks(state, crtc); if (DISPLAY_VER(dev_priv) >= 11) { const struct intel_dbuf_state *dbuf_state = @@ -3627,7 +2099,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); } @@ -3635,8 +2107,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, * to change the workaround. */ hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); - intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); + struct intel_crtc *wa_crtc; + + wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); + + intel_crtc_wait_for_next_vblank(wa_crtc); + intel_crtc_wait_for_next_vblank(wa_crtc); } } @@ -3676,38 +2152,17 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_transcoder(old_crtc_state); ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) - ilk_fdi_disable(crtc); + ilk_pch_disable(state, crtc); intel_encoders_post_disable(state, crtc); - if (old_crtc_state->has_pch_encoder) { - ilk_disable_pch_transcoder(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - i915_reg_t reg; - u32 temp; - - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - intel_de_write(dev_priv, reg, temp); - - /* disable DPLL_SEL */ - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - ilk_fdi_pll_disable(crtc); - } + if (old_crtc_state->has_pch_encoder) + ilk_pch_post_disable(state, crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -3716,12 +2171,17 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + /* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ - intel_encoders_disable(state, crtc); - intel_encoders_post_disable(state, crtc); + if (!old_crtc_state->bigjoiner_slave) { + intel_encoders_disable(state, crtc); + intel_encoders_post_disable(state, crtc); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -3738,7 +2198,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) */ drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); - assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); + assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); intel_de_write(dev_priv, PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); @@ -3858,11 +2318,7 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - - if (intel_phy_is_tc(dev_priv, phy) && - dig_port->tc_mode == TC_PORT_TBT_ALT) { + if (intel_tc_port_in_tbt_alt_mode(dig_port)) { switch (dig_port->aux_ch) { case AUX_CH_C: return POWER_DOMAIN_AUX_C_TBT; @@ -3923,16 +2379,16 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; u64 mask; - enum transcoder transcoder = crtc_state->cpu_transcoder; if (!crtc_state->hw.active) return 0; mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); - mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); + mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder)); if (crtc_state->pch_pfit.enabled || crtc_state->pch_pfit.force_thru) mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); @@ -3951,7 +2407,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE); if (crtc_state->dsc.compression_enable) - mask |= BIT_ULL(intel_dsc_power_domain(crtc_state)); + mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder)); return mask; } @@ -4015,13 +2471,10 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_pll_enable(state, crtc); - if (IS_CHERRYVIEW(dev_priv)) { - chv_prepare_pll(crtc, new_crtc_state); - chv_enable_pll(crtc, new_crtc_state); - } else { - vlv_prepare_pll(crtc, new_crtc_state); - vlv_enable_pll(crtc, new_crtc_state); - } + if (IS_CHERRYVIEW(dev_priv)) + chv_enable_pll(new_crtc_state); + else + vlv_enable_pll(new_crtc_state); intel_encoders_pre_enable(state, crtc); @@ -4032,25 +2485,14 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); - dev_priv->display.initial_watermarks(state, crtc); - intel_enable_pipe(new_crtc_state); + intel_initial_watermarks(state, crtc); + intel_enable_transcoder(new_crtc_state); intel_crtc_vblank_on(new_crtc_state); intel_encoders_enable(state, crtc); } -static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - intel_de_write(dev_priv, FP0(crtc->pipe), - crtc_state->dpll_hw_state.fp0); - intel_de_write(dev_priv, FP1(crtc->pipe), - crtc_state->dpll_hw_state.fp1); -} - static void i9xx_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -4062,8 +2504,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - i9xx_set_pll_dividers(new_crtc_state); - if (intel_crtc_has_dp_encoder(new_crtc_state)) intel_dp_set_m_n(new_crtc_state, M1_N1); @@ -4079,7 +2519,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); - i9xx_enable_pll(crtc, new_crtc_state); + i9xx_enable_pll(new_crtc_state); i9xx_pfit_enable(new_crtc_state); @@ -4088,11 +2528,9 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(new_crtc_state); - if (dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, crtc); - else - intel_update_watermarks(crtc); - intel_enable_pipe(new_crtc_state); + if (!intel_initial_watermarks(state, crtc)) + intel_update_watermarks(dev_priv); + intel_enable_transcoder(new_crtc_state); intel_crtc_vblank_on(new_crtc_state); @@ -4100,7 +2538,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, /* prevents spurious underruns */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -4111,7 +2549,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) if (!old_crtc_state->gmch_pfit.control) return; - assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); + assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", intel_de_read(dev_priv, PFIT_CONTROL)); @@ -4131,13 +2569,13 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, * wait for planes to fully turn off before disabling the pipe. */ if (DISPLAY_VER(dev_priv) == 2) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_encoders_disable(state, crtc); intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_transcoder(old_crtc_state); i9xx_pfit_disable(old_crtc_state); @@ -4157,8 +2595,8 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) != 2) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - if (!dev_priv->display.initial_watermarks) - intel_update_watermarks(crtc); + if (!dev_priv->wm_disp->initial_watermarks) + intel_update_watermarks(dev_priv); /* clock the pipe down to 640x480@60 to potentially save power */ if (IS_I830(dev_priv)) @@ -4211,7 +2649,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); - dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); + dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); @@ -4234,12 +2672,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, encoder->base.crtc = NULL; intel_fbc_disable(crtc); - intel_update_watermarks(crtc); + intel_update_watermarks(dev_priv); intel_disable_shared_dpll(crtc_state); intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains); - dev_priv->active_pipes &= ~BIT(pipe); cdclk_state->min_cdclk[pipe] = 0; cdclk_state->min_voltage_level[pipe] = 0; cdclk_state->active_pipes &= ~BIT(pipe); @@ -4596,13 +3033,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } } - /* Cantiga+ cannot handle modes with a hsync front porch of 0. - * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. - */ - if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && - pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay) - return -EINVAL; - intel_crtc_compute_pixel_rate(pipe_config); if (pipe_config->has_pch_encoder) @@ -5243,512 +3673,6 @@ out: return ret; } -static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - int i; - u32 val, final; - bool has_lvds = false; - bool has_cpu_edp = false; - bool has_panel = false; - bool has_ck505 = false; - bool can_ssc = false; - bool using_ssc_source = false; - - /* We need to take the global config into account */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - has_panel = true; - has_lvds = true; - break; - case INTEL_OUTPUT_EDP: - has_panel = true; - if (encoder->port == PORT_A) - has_cpu_edp = true; - break; - default: - break; - } - } - - if (HAS_PCH_IBX(dev_priv)) { - has_ck505 = dev_priv->vbt.display_clock_mode; - can_ssc = has_ck505; - } else { - has_ck505 = false; - can_ssc = true; - } - - /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); - - if (!(temp & DPLL_VCO_ENABLE)) - continue; - - if ((temp & PLL_REF_INPUT_MASK) == - PLLB_REF_INPUT_SPREADSPECTRUMIN) { - using_ssc_source = true; - break; - } - } - - drm_dbg_kms(&dev_priv->drm, - "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", - has_panel, has_lvds, has_ck505, using_ssc_source); - - /* Ironlake: try to setup display ref clock before DPLL - * enabling. This is only under driver's control after - * PCH B stepping, previous chipset stepping should be - * ignoring this setting. - */ - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); - - /* As we must carefully and slowly disable/enable each source in turn, - * compute the final state we want first and check if we need to - * make any changes at all. - */ - final = val; - final &= ~DREF_NONSPREAD_SOURCE_MASK; - if (has_ck505) - final |= DREF_NONSPREAD_CK505_ENABLE; - else - final |= DREF_NONSPREAD_SOURCE_ENABLE; - - final &= ~DREF_SSC_SOURCE_MASK; - final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - final &= ~DREF_SSC1_ENABLE; - - if (has_panel) { - final |= DREF_SSC_SOURCE_ENABLE; - - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_SSC1_ENABLE; - - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - else - final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else if (using_ssc_source) { - final |= DREF_SSC_SOURCE_ENABLE; - final |= DREF_SSC1_ENABLE; - } - - if (final == val) - return; - - /* Always enable nonspread source */ - val &= ~DREF_NONSPREAD_SOURCE_MASK; - - if (has_ck505) - val |= DREF_NONSPREAD_CK505_ENABLE; - else - val |= DREF_NONSPREAD_SOURCE_ENABLE; - - if (has_panel) { - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_ENABLE; - - /* SSC must be turned on before enabling the CPU output */ - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); - val |= DREF_SSC1_ENABLE; - } else - val &= ~DREF_SSC1_ENABLE; - - /* Get SSC going before enabling the outputs */ - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Enable CPU source on CPU attached eDP */ - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, - "Using SSC on eDP\n"); - val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } else { - drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Turn off CPU output */ - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - if (!using_ssc_source) { - drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); - - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; - - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } - } - - BUG_ON(val != final); -} - -static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); -} - -/* WaMPhyProgramming:hsw */ -static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); - tmp &= ~(0xFF << 24); - tmp |= (0x12 << 24); - intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); -} - -/* Implements 3 different sequences from BSpec chapter "Display iCLK - * Programming" based on the parameters passed: - * - Sequence to enable CLKOUT_DP - * - Sequence to enable CLKOUT_DP without spread - * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O - */ -static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, - bool with_spread, bool with_fdi) -{ - u32 reg, tmp; - - if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, - "FDI requires downspread\n")) - with_spread = true; - if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && - with_fdi, "LP PCH doesn't have FDI\n")) - with_fdi = false; - - mutex_lock(&dev_priv->sb_lock); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_DISABLE; - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - udelay(24); - - if (with_spread) { - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - if (with_fdi) { - lpt_reset_fdi_mphy(dev_priv); - lpt_program_fdi_mphy(dev_priv); - } - } - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Sequence to disable CLKOUT_DP */ -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) -{ - u32 reg, tmp; - - mutex_lock(&dev_priv->sb_lock); - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - if (!(tmp & SBI_SSCCTL_DISABLE)) { - if (!(tmp & SBI_SSCCTL_PATHALT)) { - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - udelay(32); - } - tmp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - } - - mutex_unlock(&dev_priv->sb_lock); -} - -#define BEND_IDX(steps) ((50 + (steps)) / 5) - -static const u16 sscdivintphase[] = { - [BEND_IDX( 50)] = 0x3B23, - [BEND_IDX( 45)] = 0x3B23, - [BEND_IDX( 40)] = 0x3C23, - [BEND_IDX( 35)] = 0x3C23, - [BEND_IDX( 30)] = 0x3D23, - [BEND_IDX( 25)] = 0x3D23, - [BEND_IDX( 20)] = 0x3E23, - [BEND_IDX( 15)] = 0x3E23, - [BEND_IDX( 10)] = 0x3F23, - [BEND_IDX( 5)] = 0x3F23, - [BEND_IDX( 0)] = 0x0025, - [BEND_IDX( -5)] = 0x0025, - [BEND_IDX(-10)] = 0x0125, - [BEND_IDX(-15)] = 0x0125, - [BEND_IDX(-20)] = 0x0225, - [BEND_IDX(-25)] = 0x0225, - [BEND_IDX(-30)] = 0x0325, - [BEND_IDX(-35)] = 0x0325, - [BEND_IDX(-40)] = 0x0425, - [BEND_IDX(-45)] = 0x0425, - [BEND_IDX(-50)] = 0x0525, -}; - -/* - * Bend CLKOUT_DP - * steps -50 to 50 inclusive, in steps of 5 - * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) - * change in clock period = -(steps / 10) * 5.787 ps - */ -static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) -{ - u32 tmp; - int idx = BEND_IDX(steps); - - if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) - return; - - if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) - return; - - mutex_lock(&dev_priv->sb_lock); - - if (steps % 10 != 0) - tmp = 0xAAAAAAAB; - else - tmp = 0x00000000; - intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); - tmp &= 0xffff0000; - tmp |= sscdivintphase[idx]; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -#undef BEND_IDX - -static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, SPLL_CTL); - - if ((ctl & SPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - if (IS_BROADWELL(dev_priv) && - (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) - return true; - - return false; -} - -static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, - enum intel_dpll_id id) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); - - if ((ctl & WRPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) - return true; - - if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && - (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - return false; -} - -static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - bool has_fdi = false; - - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_fdi = true; - break; - default: - break; - } - } - - /* - * The BIOS may have decided to use the PCH SSC - * reference so we must not disable it until the - * relevant PLLs have stopped relying on it. We'll - * just leave the PCH SSC reference enabled in case - * any active PLL is using it. It will get disabled - * after runtime suspend if we don't have FDI. - * - * TODO: Move the whole reference clock handling - * to the modeset sequence proper so that we can - * actually enable/disable/reconfigure these things - * safely. To do that we need to introduce a real - * clock hierarchy. That would also allow us to do - * clock bending finally. - */ - dev_priv->pch_ssc_use = 0; - - if (spll_uses_pch_ssc(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); - } - - if (dev_priv->pch_ssc_use) - return; - - if (has_fdi) { - lpt_bend_clkout_dp(dev_priv, 0); - lpt_enable_clkout_dp(dev_priv, true, true); - } else { - lpt_disable_clkout_dp(dev_priv); - } -} - -/* - * Initialize reference clocks when the driver loads - */ -void intel_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ilk_init_pch_refclk(dev_priv); - else if (HAS_PCH_LPT(dev_priv)) - lpt_init_pch_refclk(dev_priv); -} - static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -5806,7 +3730,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) intel_de_posting_read(dev_priv, PIPECONF(pipe)); } -static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) +static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5870,9 +3794,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) val |= PIPEMISC_YUV420_ENABLE | PIPEMISC_YUV420_MODE_FULL_BLEND; - if (DISPLAY_VER(dev_priv) >= 11 && - (crtc_state->active_planes & ~(icl_hdr_plane_mask() | - BIT(PLANE_CURSOR))) == 0) + if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) val |= PIPEMISC_HDR_MODE_PRECISION; if (DISPLAY_VER(dev_priv) >= 12) @@ -6015,8 +3937,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, &pipe_config->dp_m2_n2); } -static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, &pipe_config->fdi_m_n, NULL); @@ -6153,117 +4075,139 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); - if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { - struct intel_shared_dpll *pll; - enum intel_dpll_id pll_id; - bool pll_active; + pipe_config->pixel_multiplier = 1; + + ilk_pch_get_config(pipe_config); - pipe_config->has_pch_encoder = true; + intel_get_transcoder_timings(crtc, pipe_config); + intel_get_pipe_src_size(crtc, pipe_config); - tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; + ilk_get_pfit_config(pipe_config); - ilk_get_fdi_m_n_config(crtc, pipe_config); + ret = true; - if (HAS_PCH_IBX(dev_priv)) { - /* - * The pipe->pch transcoder and pch transcoder->pll - * mapping is fixed. - */ - pll_id = (enum intel_dpll_id) crtc->pipe; - } else { - tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); - if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) - pll_id = DPLL_ID_PCH_PLL_B; +out: + intel_display_power_put(dev_priv, power_domain, wakeref); + + return ret; +} + +static u8 bigjoiner_pipes(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 12) + return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); + else if (DISPLAY_VER(i915) >= 11) + return BIT(PIPE_B) | BIT(PIPE_C); + else + return 0; +} + +static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; + u32 tmp = 0; + + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); + + return tmp & TRANS_DDI_FUNC_ENABLE; +} + +static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) +{ + u8 master_pipes = 0, slave_pipes = 0; + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0) + continue; + + power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + + if (!(tmp & BIG_JOINER_ENABLE)) + continue; + + if (tmp & MASTER_BIG_JOINER_ENABLE) + master_pipes |= BIT(pipe); else - pll_id= DPLL_ID_PCH_PLL_A; + slave_pipes |= BIT(pipe); } - pipe_config->shared_dpll = - intel_get_shared_dpll_by_id(dev_priv, pll_id); - pll = pipe_config->shared_dpll; - - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(dev, !pll_active); + if (DISPLAY_VER(dev_priv) < 13) + continue; - tmp = pipe_config->dpll_hw_state.dpll; - pipe_config->pixel_multiplier = - ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) - >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + power_domain = POWER_DOMAIN_PIPE(pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); - ilk_pch_clock_get(crtc, pipe_config); - } else { - pipe_config->pixel_multiplier = 1; + if (tmp & UNCOMPRESSED_JOINER_MASTER) + master_pipes |= BIT(pipe); + if (tmp & UNCOMPRESSED_JOINER_SLAVE) + slave_pipes |= BIT(pipe); + } } - intel_get_transcoder_timings(crtc, pipe_config); - intel_get_pipe_src_size(crtc, pipe_config); + /* Bigjoiner pipes should always be consecutive master and slave */ + drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1, + "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", + master_pipes, slave_pipes); - ilk_get_pfit_config(pipe_config); + return slave_pipes; +} - ret = true; +static u8 hsw_panel_transcoders(struct drm_i915_private *i915) +{ + u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); -out: - intel_display_power_put(dev_priv, power_domain, wakeref); + if (DISPLAY_VER(i915) >= 11) + panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); - return ret; + return panel_transcoder_mask; } -static bool hsw_get_transcoder_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config, - struct intel_display_power_domain_set *power_domain_set) +static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); - unsigned long enabled_panel_transcoders = 0; - enum transcoder panel_transcoder; - u32 tmp; - - if (DISPLAY_VER(dev_priv) >= 11) - panel_transcoder_mask |= - BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); - - /* - * The pipe->transcoder mapping is fixed with the exception of the eDP - * and DSI transcoders handled below. - */ - pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; + u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv); + enum transcoder cpu_transcoder; + u8 enabled_transcoders = 0; /* * XXX: Do intel_display_power_get_if_enabled before reading this (for * consistency and less surprising code; it's in always on power). */ - for_each_cpu_transcoder_masked(dev_priv, panel_transcoder, + for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, panel_transcoder_mask) { - bool force_thru = false; + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; enum pipe trans_pipe; + u32 tmp = 0; - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(panel_transcoder)); - if (!(tmp & TRANS_DDI_FUNC_ENABLE)) - continue; + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); - /* - * Log all enabled ones, only use the first one. - * - * FIXME: This won't work for two separate DSI displays. - */ - enabled_panel_transcoders |= BIT(panel_transcoder); - if (enabled_panel_transcoders != BIT(panel_transcoder)) + if (!(tmp & TRANS_DDI_FUNC_ENABLE)) continue; switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: drm_WARN(dev, 1, "unknown pipe linked to transcoder %s\n", - transcoder_name(panel_transcoder)); + transcoder_name(cpu_transcoder)); fallthrough; case TRANS_DDI_EDP_INPUT_A_ONOFF: - force_thru = true; - fallthrough; case TRANS_DDI_EDP_INPUT_A_ON: trans_pipe = PIPE_A; break; @@ -6278,22 +4222,91 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, break; } - if (trans_pipe == crtc->pipe) { - pipe_config->cpu_transcoder = panel_transcoder; - pipe_config->pch_pfit.force_thru = force_thru; - } + if (trans_pipe == crtc->pipe) + enabled_transcoders |= BIT(cpu_transcoder); } + /* single pipe or bigjoiner master */ + cpu_transcoder = (enum transcoder) crtc->pipe; + if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) + enabled_transcoders |= BIT(cpu_transcoder); + + /* bigjoiner slave -> consider the master pipe's transcoder as well */ + if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) { + cpu_transcoder = (enum transcoder) crtc->pipe - 1; + if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) + enabled_transcoders |= BIT(cpu_transcoder); + } + + return enabled_transcoders; +} + +static bool has_edp_transcoders(u8 enabled_transcoders) +{ + return enabled_transcoders & BIT(TRANSCODER_EDP); +} + +static bool has_dsi_transcoders(u8 enabled_transcoders) +{ + return enabled_transcoders & (BIT(TRANSCODER_DSI_0) | + BIT(TRANSCODER_DSI_1)); +} + +static bool has_pipe_transcoders(u8 enabled_transcoders) +{ + return enabled_transcoders & ~(BIT(TRANSCODER_EDP) | + BIT(TRANSCODER_DSI_0) | + BIT(TRANSCODER_DSI_1)); +} + +static void assert_enabled_transcoders(struct drm_i915_private *i915, + u8 enabled_transcoders) +{ + /* Only one type of transcoder please */ + drm_WARN_ON(&i915->drm, + has_edp_transcoders(enabled_transcoders) + + has_dsi_transcoders(enabled_transcoders) + + has_pipe_transcoders(enabled_transcoders) > 1); + + /* Only DSI transcoders can be ganged */ + drm_WARN_ON(&i915->drm, + !has_dsi_transcoders(enabled_transcoders) && + !is_power_of_2(enabled_transcoders)); +} + +static bool hsw_get_transcoder_state(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config, + struct intel_display_power_domain_set *power_domain_set) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + unsigned long enabled_transcoders; + u32 tmp; + + enabled_transcoders = hsw_enabled_transcoders(crtc); + if (!enabled_transcoders) + return false; + + assert_enabled_transcoders(dev_priv, enabled_transcoders); + /* - * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 + * With the exception of DSI we should only ever have + * a single enabled transcoder. With DSI let's just + * pick the first one. */ - drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && - enabled_panel_transcoders != BIT(TRANSCODER_EDP)); + pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1; if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) return false; + if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) { + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); + + if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF) + pipe_config->pch_pfit.force_thru = true; + } + tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); return tmp & PIPECONF_ENABLE; @@ -6345,45 +4358,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } -static void hsw_get_ddi_port_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - enum port port; - u32 tmp; - - if (transcoder_is_dsi(cpu_transcoder)) { - port = (cpu_transcoder == TRANSCODER_DSI_A) ? - PORT_A : PORT_B; - } else { - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (!(tmp & TRANS_DDI_FUNC_ENABLE)) - return; - if (DISPLAY_VER(dev_priv) >= 12) - port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - else - port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - } - - /* - * Haswell has only FDI/PCH transcoder A. It is which is connected to - * DDI E. So just check whether this pipe is wired to DDI E and whether - * the PCH transcoder is on. - */ - if (DISPLAY_VER(dev_priv) < 9 && - (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { - pipe_config->has_pch_encoder = true; - - tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ilk_get_fdi_m_n_config(crtc, pipe_config); - } -} - static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -6410,21 +4384,12 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) intel_uncompressed_joiner_get_config(pipe_config); - if (!active) { - /* bigjoiner slave doesn't enable transcoder */ - if (!pipe_config->bigjoiner_slave) - goto out; - - active = true; - pipe_config->pixel_multiplier = 1; + if (!active) + goto out; - /* we cannot read out most state, so don't bother.. */ - pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; - } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || - DISPLAY_VER(dev_priv) >= 11) { - hsw_get_ddi_port_state(crtc, pipe_config); + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || + DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); - } if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) intel_vrr_get_config(crtc, pipe_config); @@ -6492,10 +4457,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } } - if (pipe_config->bigjoiner_slave) { - /* Cannot be read out as a slave, set to 0. */ - pipe_config->pixel_multiplier = 0; - } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && + if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, @@ -6515,7 +4477,7 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!i915->display.get_pipe_config(crtc, crtc_state)) + if (!i915->display->get_pipe_config(crtc, crtc_state)) return false; crtc_state->hw.active = true; @@ -6531,28 +4493,6 @@ static const struct drm_display_mode load_detect_mode = { 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; -struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct intel_framebuffer *intel_fb; - int ret; - - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); - if (!intel_fb) - return ERR_PTR(-ENOMEM); - - ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); - if (ret) - goto err; - - return &intel_fb->base; - -err: - kfree(intel_fb); - return ERR_PTR(ret); -} - static int intel_modeset_disable_planes(struct drm_atomic_state *state, struct drm_crtc *crtc) { @@ -6714,7 +4654,8 @@ found: drm_atomic_state_put(state); /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); + return true; fail: @@ -6775,12 +4716,11 @@ static int i9xx_pll_refclk(struct drm_device *dev, } /* Returns the clock of the currently programmed mode of the given pipe. */ -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; struct dpll clock; @@ -6830,11 +4770,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, else port_clock = i9xx_calc_dpll_params(refclk, &clock); } else { - u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, - LVDS); - bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); + enum pipe lvds_pipe; + + if (IS_I85X(dev_priv) && + intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && + lvds_pipe == crtc->pipe) { + u32 lvds = intel_de_read(dev_priv, LVDS); - if (is_lvds) { clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); @@ -6885,24 +4827,6 @@ int intel_dotclock_calculate(int link_freq, return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* read out port_clock from the DPLL */ - i9xx_crtc_clock_get(crtc, pipe_config); - - /* - * In case there is an active pipe without active ports, - * we may need some idea for the dotclock anyway. - * Calculate one based on the FDI configuration. - */ - pipe_config->hw.adjusted_mode.crtc_clock = - intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), - &pipe_config->fdi_m_n); -} - /* Returns the currently programmed mode of the given encoder. */ struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) @@ -6916,7 +4840,7 @@ intel_encoder_current_mode(struct intel_encoder *encoder) if (!encoder->get_hw_state(encoder, &pipe)) return NULL; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -6985,27 +4909,27 @@ static bool needs_scaling(const struct intel_plane_state *state) } int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct intel_crtc_state *crtc_state, + struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, - struct intel_plane_state *plane_state) + struct intel_plane_state *new_plane_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - bool mode_changed = intel_crtc_needs_modeset(crtc_state); + bool mode_changed = intel_crtc_needs_modeset(new_crtc_state); bool was_crtc_enabled = old_crtc_state->hw.active; - bool is_crtc_enabled = crtc_state->hw.active; + bool is_crtc_enabled = new_crtc_state->hw.active; bool turn_off, turn_on, visible, was_visible; int ret; if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { - ret = skl_update_scaler_plane(crtc_state, plane_state); + ret = skl_update_scaler_plane(new_crtc_state, new_plane_state); if (ret) return ret; } was_visible = old_plane_state->uapi.visible; - visible = plane_state->uapi.visible; + visible = new_plane_state->uapi.visible; if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) was_visible = false; @@ -7021,7 +4945,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { - intel_plane_set_invisible(crtc_state, plane_state); + intel_plane_set_invisible(new_crtc_state, new_plane_state); visible = false; } @@ -7040,28 +4964,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat if (turn_on) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - crtc_state->update_wm_pre = true; + new_crtc_state->update_wm_pre = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - crtc_state->disable_cxsr = true; + new_crtc_state->disable_cxsr = true; } else if (turn_off) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - crtc_state->update_wm_post = true; + new_crtc_state->update_wm_post = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - crtc_state->disable_cxsr = true; - } else if (intel_wm_need_update(old_plane_state, plane_state)) { + new_crtc_state->disable_cxsr = true; + } else if (intel_wm_need_update(old_plane_state, new_plane_state)) { if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { /* FIXME bollocks */ - crtc_state->update_wm_pre = true; - crtc_state->update_wm_post = true; + new_crtc_state->update_wm_pre = true; + new_crtc_state->update_wm_post = true; } } if (visible || was_visible) - crtc_state->fb_bits |= plane->frontbuffer_bit; + new_crtc_state->fb_bits |= plane->frontbuffer_bit; /* * ILK/SNB DVSACNTR/Sprite Enable @@ -7100,8 +5024,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!needs_scaling(old_plane_state) && - needs_scaling(plane_state)))) - crtc_state->disable_lp_wm = true; + needs_scaling(new_plane_state)))) + new_crtc_state->disable_lp_wm = true; return 0; } @@ -7237,6 +5161,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; linked_state->color_ctl = plane_state->color_ctl; linked_state->view = plane_state->view; + linked_state->decrypt = plane_state->decrypt; intel_plane_copy_hw_state(linked_state, plane_state); linked_state->uapi.src = plane_state->uapi.src; @@ -7244,13 +5169,13 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) if (icl_is_hdr_plane(dev_priv, plane->id)) { if (linked->id == PLANE_SPRITE5) - plane_state->cus_ctl |= PLANE_CUS_PLANE_7; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL; else if (linked->id == PLANE_SPRITE4) - plane_state->cus_ctl |= PLANE_CUS_PLANE_6; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL; else if (linked->id == PLANE_SPRITE3) - plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL; else if (linked->id == PLANE_SPRITE2) - plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL; + plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL; else MISSING_CASE(linked->id); } @@ -7363,10 +5288,8 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, crtc_state->update_wm_post = true; if (mode_changed && crtc_state->hw.enable && - dev_priv->display.crtc_compute_clock && - !crtc_state->bigjoiner_slave && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { - ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); + ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state); if (ret) return ret; } @@ -7385,32 +5308,23 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, return ret; } - if (dev_priv->display.compute_pipe_wm) { - ret = dev_priv->display.compute_pipe_wm(state, crtc); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "Target pipe watermarks are invalid\n"); - return ret; - } - + ret = intel_compute_pipe_wm(state, crtc); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "Target pipe watermarks are invalid\n"); + return ret; } - if (dev_priv->display.compute_intermediate_wm) { - if (drm_WARN_ON(&dev_priv->drm, - !dev_priv->display.compute_pipe_wm)) - return 0; - - /* - * Calculate 'intermediate' watermarks that satisfy both the - * old state and the new state. We can program these - * immediately. - */ - ret = dev_priv->display.compute_intermediate_wm(state, crtc); - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "No valid intermediate pipe watermarks are possible\n"); - return ret; - } + /* + * Calculate 'intermediate' watermarks that satisfy both the + * old state and the new state. We can program these + * immediately. + */ + ret = intel_compute_intermediate_wm(state, crtc); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "No valid intermediate pipe watermarks are possible\n"); + return ret; } if (DISPLAY_VER(dev_priv) >= 9) { @@ -7439,11 +5353,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, } - if (!mode_changed) { - ret = intel_psr2_sel_fetch_update(state, crtc); - if (ret) - return ret; - } + ret = intel_psr2_sel_fetch_update(state, crtc); + if (ret) + return ret; return 0; } @@ -7931,18 +5843,15 @@ static void intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { - const struct intel_crtc_state *from_crtc_state = crtc_state; - - if (crtc_state->bigjoiner_slave) { - from_crtc_state = intel_atomic_get_new_crtc_state(state, - crtc_state->bigjoiner_linked_crtc); + const struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; - /* No need to copy state if the master state is unchanged */ - if (!from_crtc_state) - return; - } + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); - intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); + /* No need to copy state if the master state is unchanged */ + if (master_crtc_state) + intel_crtc_copy_color_blobs(crtc_state, master_crtc_state); } static void @@ -7985,7 +5894,6 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, const struct intel_crtc_state *from_crtc_state) { struct intel_crtc_state *saved_state; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); if (!saved_state) @@ -8015,8 +5923,8 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); crtc_state->bigjoiner_slave = true; - crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; - crtc_state->has_audio = false; + crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder; + crtc_state->has_audio = from_crtc_state->has_audio; return 0; } @@ -8153,11 +6061,10 @@ encoder_retry: ret = encoder->compute_config(encoder, pipe_config, connector_state); + if (ret == -EDEADLK) + return ret; if (ret < 0) { - if (ret != -EDEADLK) - drm_dbg_kms(&i915->drm, - "Encoder config failure: %d\n", - ret); + drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret); return ret; } } @@ -8171,12 +6078,7 @@ encoder_retry: ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); if (ret == -EDEADLK) return ret; - if (ret < 0) { - drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); - return ret; - } - - if (ret == I915_DISPLAY_CONFIG_RETRY) { + if (ret == -EAGAIN) { if (drm_WARN(&i915->drm, !retry, "loop in pipe configuration computation\n")) return -EINVAL; @@ -8185,6 +6087,10 @@ encoder_retry: retry = false; goto encoder_retry; } + if (ret < 0) { + drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret); + return ret; + } /* Dithering seems to not pass-through bits correctly when it should, so * only enable it on 6bpc panels and when its not a compliance @@ -8614,51 +6520,48 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(output_types); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(pixel_multiplier); - + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(pixel_multiplier); + + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_INTERLACE); + + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_INTERLACE); - - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NVSYNC); - } + DRM_MODE_FLAG_PHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_PVSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NVSYNC); } PIPE_CONF_CHECK_I(output_format); @@ -8670,9 +6573,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_BOOL(fec_enable); + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); @@ -8701,9 +6602,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } PIPE_CONF_CHECK_I(scaler_state.scaler_id); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -8720,19 +6619,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (bp_gamma) PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); - PIPE_CONF_CHECK_BOOL(has_psr); - PIPE_CONF_CHECK_BOOL(has_psr2); - PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); - PIPE_CONF_CHECK_I(dc3co_exitline); + if (current_config->active_planes) { + PIPE_CONF_CHECK_BOOL(has_psr); + PIPE_CONF_CHECK_BOOL(has_psr2); + PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch); + PIPE_CONF_CHECK_I(dc3co_exitline); + } } PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->dpll.mgr) + if (dev_priv->dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); - /* FIXME do the readout properly and get rid of this quirk */ - if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); @@ -8766,21 +6665,19 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); } - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_X(dsi_pll.ctrl); - PIPE_CONF_CHECK_X(dsi_pll.div); + PIPE_CONF_CHECK_X(dsi_pll.ctrl); + PIPE_CONF_CHECK_X(dsi_pll.div); - if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) - PIPE_CONF_CHECK_I(pipe_bpp); + if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); - PIPE_CONF_CHECK_I(min_voltage_level); - } + PIPE_CONF_CHECK_I(min_voltage_level); - if (fastset && (current_config->has_psr || pipe_config->has_psr)) + if (current_config->has_psr || pipe_config->has_psr) PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ~intel_hdmi_infoframe_enable(DP_SDP_VSC)); else @@ -9052,7 +6949,7 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; - struct intel_crtc *master = crtc; + struct intel_crtc *master_crtc; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -9080,10 +6977,9 @@ verify_crtc_state(struct intel_crtc *crtc, "(expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); - if (new_crtc_state->bigjoiner_slave) - master = new_crtc_state->bigjoiner_linked_crtc; + master_crtc = intel_master_crtc(new_crtc_state); - for_each_encoder_on_crtc(dev, &master->base, encoder) { + for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { enum pipe pipe; bool active; @@ -9093,7 +6989,7 @@ verify_crtc_state(struct intel_crtc *crtc, encoder->base.base.id, active, new_crtc_state->hw.active); - I915_STATE_WARN(active && master->pipe != pipe, + I915_STATE_WARN(active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -9104,10 +7000,6 @@ verify_crtc_state(struct intel_crtc *crtc, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - /* No PLLs set for slave */ - pipe_config->shared_dpll = NULL; - intel_pipe_config_sanity_check(dev_priv, pipe_config); if (!intel_pipe_config_compare(new_crtc_state, @@ -9226,9 +7118,6 @@ verify_mpllb_state(struct intel_atomic_state *state, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - return; - encoder = intel_get_crtc_new_encoder(state, new_crtc_state); intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); @@ -9402,7 +7291,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) struct intel_crtc *crtc; int i; - if (!dev_priv->display.crtc_compute_clock) + if (!dev_priv->dpll_funcs) return; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -9503,23 +7392,6 @@ static int intel_modeset_checks(struct intel_atomic_state *state) return 0; } -/* - * Handle calculation of various watermark data at the end of the atomic check - * phase. The code here should be run after the per-crtc and per-plane 'check' - * handlers to ensure that all derived state has been updated. - */ -static int calc_watermark_data(struct intel_atomic_state *state) -{ - struct drm_device *dev = state->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - /* Is there platform-specific watermark information to calculate? */ - if (dev_priv->display.compute_global_watermarks) - return dev_priv->display.compute_global_watermarks(state); - - return 0; -} - static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { @@ -9684,59 +7556,6 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) return 0; } -static int intel_atomic_check_cdclk(struct intel_atomic_state *state, - bool *need_cdclk_calc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_cdclk_state *old_cdclk_state; - const struct intel_cdclk_state *new_cdclk_state; - struct intel_plane_state *plane_state; - struct intel_bw_state *new_bw_state; - struct intel_plane *plane; - int min_cdclk = 0; - enum pipe pipe; - int ret; - int i; - /* - * active_planes bitmask has been updated, and potentially - * affected planes are part of the state. We can now - * compute the minimum cdclk for each plane. - */ - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); - if (ret) - return ret; - } - - old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - - if (new_cdclk_state && - old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) - *need_cdclk_calc = true; - - ret = dev_priv->display.bw_calc_min_cdclk(state); - if (ret) - return ret; - - new_bw_state = intel_atomic_get_new_bw_state(state); - - if (!new_cdclk_state || !new_bw_state) - return 0; - - for_each_pipe(dev_priv, pipe) { - min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk); - - /* - * Currently do this change only if we need to increase - */ - if (new_bw_state->min_cdclk > min_cdclk) - *need_cdclk_calc = true; - } - - return 0; -} - static int intel_atomic_check_crtcs(struct intel_atomic_state *state) { struct intel_crtc_state *crtc_state; @@ -9782,13 +7601,13 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state) { struct intel_crtc_state *slave_crtc_state, *master_crtc_state; - struct intel_crtc *slave, *master; + struct intel_crtc *slave_crtc, *master_crtc; /* slave being enabled, is master is still claiming this crtc? */ if (old_crtc_state->bigjoiner_slave) { - slave = crtc; - master = old_crtc_state->bigjoiner_linked_crtc; - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + slave_crtc = crtc; + master_crtc = old_crtc_state->bigjoiner_linked_crtc; + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) goto claimed; } @@ -9796,17 +7615,17 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, if (!new_crtc_state->bigjoiner) return 0; - slave = intel_dsc_get_bigjoiner_secondary(crtc); - if (!slave) { + slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); + if (!slave_crtc) { DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " "CRTC + 1 to be used, doesn't exist\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } - new_crtc_state->bigjoiner_linked_crtc = slave; - slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); - master = crtc; + new_crtc_state->bigjoiner_linked_crtc = slave_crtc; + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); + master_crtc = crtc; if (IS_ERR(slave_crtc_state)) return PTR_ERR(slave_crtc_state); @@ -9815,15 +7634,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, goto claimed; DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", - slave->base.base.id, slave->base.name); + slave_crtc->base.base.id, slave_crtc->base.name); return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); claimed: DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", - slave->base.base.id, slave->base.name, - master->base.base.id, master->base.name); + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); return -EINVAL; } @@ -9857,35 +7676,37 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state, * correspond to the last vblank and have no relation to the actual time when * the flip done event was sent. */ -static int intel_atomic_check_async(struct intel_atomic_state *state) +static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state, *new_crtc_state; const struct intel_plane_state *new_plane_state, *old_plane_state; - struct intel_crtc *crtc; struct intel_plane *plane; int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (intel_crtc_needs_modeset(new_crtc_state)) { - drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); - return -EINVAL; - } + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!new_crtc_state->hw.active) { - drm_dbg_kms(&i915->drm, "CRTC inactive\n"); - return -EINVAL; - } - if (old_crtc_state->active_planes != new_crtc_state->active_planes) { - drm_dbg_kms(&i915->drm, - "Active planes cannot be changed during async flip\n"); - return -EINVAL; - } + if (intel_crtc_needs_modeset(new_crtc_state)) { + drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); + return -EINVAL; + } + + if (!new_crtc_state->hw.active) { + drm_dbg_kms(&i915->drm, "CRTC inactive\n"); + return -EINVAL; + } + if (old_crtc_state->active_planes != new_crtc_state->active_planes) { + drm_dbg_kms(&i915->drm, + "Active planes cannot be changed during async flip\n"); + return -EINVAL; } for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + /* * TODO: Async flip is only supported through the page flip IOCTL * as of now. So support currently added for primary plane only. @@ -9912,8 +7733,14 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) return -EINVAL; } - if (old_plane_state->view.color_plane[0].stride != - new_plane_state->view.color_plane[0].stride) { + if (new_plane_state->hw.fb->format->num_planes > 1) { + drm_dbg_kms(&i915->drm, + "Planar formats not supported with async flips\n"); + return -EINVAL; + } + + if (old_plane_state->view.color_plane[0].mapping_stride != + new_plane_state->view.color_plane[0].mapping_stride) { drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); return -EINVAL; } @@ -9967,6 +7794,10 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n"); return -EINVAL; } + + /* plane decryption is allow to change only in synchronous flips */ + if (old_plane_state->decrypt != new_plane_state->decrypt) + return -EINVAL; } return 0; @@ -10165,8 +7996,7 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - intel_fbc_choose_crtc(dev_priv, state); - ret = calc_watermark_data(state); + ret = intel_compute_global_watermarks(state); if (ret) goto fail; @@ -10174,7 +8004,7 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - ret = intel_atomic_check_cdclk(state, &any_ms); + ret = intel_cdclk_atomic_check(state, &any_ms); if (ret) goto fail; @@ -10197,10 +8027,14 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; + ret = intel_fbc_atomic_check(state); + if (ret) + goto fail; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) { - ret = intel_atomic_check_async(state); + ret = intel_atomic_check_async(state, crtc); if (ret) goto fail; } @@ -10336,12 +8170,11 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, if (new_crtc_state->update_pipe) intel_pipe_fastset(old_crtc_state, new_crtc_state); - - intel_psr2_program_trans_man_trk_ctl(new_crtc_state); } - if (dev_priv->display.atomic_update_watermarks) - dev_priv->display.atomic_update_watermarks(state, crtc); + intel_psr2_program_trans_man_trk_ctl(new_crtc_state); + + intel_atomic_update_watermarks(state, crtc); } static void commit_pipe_post_planes(struct intel_atomic_state *state, @@ -10373,7 +8206,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, intel_crtc_update_active_timings(new_crtc_state); - dev_priv->display.crtc_enable(state, crtc); + dev_priv->display->crtc_enable(state, crtc); if (new_crtc_state->bigjoiner_slave) return; @@ -10404,10 +8237,9 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_encoders_update_pipe(state, crtc); } - if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) - intel_fbc_disable(crtc); - else - intel_fbc_enable(state, crtc); + intel_fbc_update(state, crtc); + + intel_update_planes_on_crtc(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); @@ -10415,9 +8247,9 @@ static void intel_update_crtc(struct intel_atomic_state *state, commit_pipe_pre_planes(state, crtc); if (DISPLAY_VER(dev_priv) >= 9) - skl_update_planes_on_crtc(state, crtc); + skl_arm_planes_on_crtc(state, crtc); else - i9xx_update_planes_on_crtc(state, crtc); + i9xx_arm_planes_on_crtc(state, crtc); commit_pipe_post_planes(state, crtc); @@ -10441,39 +8273,21 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); - - intel_encoders_pre_disable(state, crtc); - - intel_crtc_disable_planes(state, crtc); - - /* - * We still need special handling for disabling bigjoiner master - * and slaves since for slave we do not have encoder or plls - * so we dont need to disable those. - */ - if (old_crtc_state->bigjoiner) { - intel_crtc_disable_planes(state, - old_crtc_state->bigjoiner_linked_crtc); - old_crtc_state->bigjoiner_linked_crtc->active = false; - } - /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ intel_crtc_disable_pipe_crc(crtc); - dev_priv->display.crtc_disable(state, crtc); + dev_priv->display->crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); intel_disable_shared_dpll(old_crtc_state); /* FIXME unify this for all platforms */ if (!new_crtc_state->hw.active && - !HAS_GMCH(dev_priv) && - dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, crtc); + !HAS_GMCH(dev_priv)) + intel_initial_watermarks(state, crtc); } static void intel_commit_modeset_disables(struct intel_atomic_state *state) @@ -10483,10 +8297,22 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) u32 handled = 0; int i; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!old_crtc_state->hw.active) + continue; + + intel_pre_plane_update(state, crtc); + intel_crtc_disable_planes(state, crtc); + } + /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (!old_crtc_state->hw.active) @@ -10498,10 +8324,10 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * Slave vblanks are masked till Master Vblanks. */ if (!is_trans_port_sync_slave(old_crtc_state) && - !intel_dp_mst_is_slave_trans(old_crtc_state)) + !intel_dp_mst_is_slave_trans(old_crtc_state) && + !old_crtc_state->bigjoiner_slave) continue; - intel_pre_plane_update(state, crtc); intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); handled |= BIT(crtc->pipe); @@ -10511,21 +8337,14 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state) || - (handled & BIT(crtc->pipe)) || - old_crtc_state->bigjoiner_slave) + (handled & BIT(crtc->pipe))) continue; - intel_pre_plane_update(state, crtc); - if (old_crtc_state->bigjoiner) { - struct intel_crtc *slave = - old_crtc_state->bigjoiner_linked_crtc; - - intel_pre_plane_update(state, slave); - } + if (!old_crtc_state->hw.active) + continue; - if (old_crtc_state->hw.active) - intel_old_crtc_state_disables(state, old_crtc_state, - new_crtc_state, crtc); + intel_old_crtc_state_disables(state, old_crtc_state, + new_crtc_state, crtc); } } @@ -10603,7 +8422,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && (update_pipes | modeset_pipes)) - intel_wait_for_vblank(dev_priv, pipe); + intel_crtc_wait_for_next_vblank(crtc); } } @@ -10745,10 +8564,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct drm_framebuffer *fb = plane_state->hw.fb; + int cc_plane; int ret; - if (!fb || - fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) + if (!fb) + continue; + + cc_plane = intel_fb_rc_ccs_cc_plane(fb); + if (cc_plane < 0) continue; /* @@ -10765,7 +8588,7 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s * GPU write on it. */ ret = i915_gem_object_read_from_page(intel_fb_obj(fb), - fb->offsets[2] + 16, + fb->offsets[cc_plane] + 16, &plane_state->ccval, sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ @@ -10833,8 +8656,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } } - if (state->modeset) - intel_encoders_update_prepare(state); + intel_encoders_update_prepare(state); intel_dbuf_pre_plane_update(state); @@ -10844,13 +8666,14 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display.commit_modeset_enables(state); + dev_priv->display->commit_modeset_enables(state); - if (state->modeset) { - intel_encoders_update_complete(state); + intel_encoders_update_complete(state); + if (state->modeset) intel_set_cdclk_post_plane_update(state); - } + + intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -10866,13 +8689,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) intel_crtc_disable_flip_done(state, crtc); - - if (new_crtc_state->hw.active && - !intel_crtc_needs_modeset(new_crtc_state) && - !new_crtc_state->preload_luts && - (new_crtc_state->uapi.color_mgmt_changed || - new_crtc_state->update_pipe)) - intel_color_load_luts(new_crtc_state); } /* @@ -10895,11 +8711,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state)) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(state, crtc); + intel_optimize_watermarks(state, crtc); } intel_dbuf_post_plane_update(state); + intel_psr_post_plane_update(state); for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); @@ -10959,7 +8775,7 @@ static void intel_atomic_commit_work(struct work_struct *work) intel_atomic_commit_tail(state); } -static int __i915_sw_fence_call +static int intel_atomic_commit_ready(struct i915_sw_fence *fence, enum i915_sw_fence_notify notify) { @@ -11088,280 +8904,6 @@ static int intel_atomic_commit(struct drm_device *dev, return 0; } -struct wait_rps_boost { - struct wait_queue_entry wait; - - struct drm_crtc *crtc; - struct i915_request *request; -}; - -static int do_rps_boost(struct wait_queue_entry *_wait, - unsigned mode, int sync, void *key) -{ - struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); - struct i915_request *rq = wait->request; - - /* - * If we missed the vblank, but the request is already running it - * is reasonable to assume that it will complete before the next - * vblank without our intervention, so leave RPS alone. - */ - if (!i915_request_started(rq)) - intel_rps_boost(rq); - i915_request_put(rq); - - drm_crtc_vblank_put(wait->crtc); - - list_del(&wait->wait.entry); - kfree(wait); - return 1; -} - -static void add_rps_boost_after_vblank(struct drm_crtc *crtc, - struct dma_fence *fence) -{ - struct wait_rps_boost *wait; - - if (!dma_fence_is_i915(fence)) - return; - - if (DISPLAY_VER(to_i915(crtc->dev)) < 6) - return; - - if (drm_crtc_vblank_get(crtc)) - return; - - wait = kmalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - drm_crtc_vblank_put(crtc); - return; - } - - wait->request = to_request(dma_fence_get(fence)); - wait->crtc = crtc; - - wait->wait.func = do_rps_boost; - wait->wait.flags = 0; - - add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); -} - -int intel_plane_pin_fb(struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - struct drm_framebuffer *fb = plane_state->hw.fb; - struct i915_vma *vma; - bool phys_cursor = - plane->id == PLANE_CURSOR && - INTEL_INFO(dev_priv)->display.cursor_needs_physical; - - if (!intel_fb_uses_dpt(fb)) { - vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, - &plane_state->view.gtt, - intel_plane_uses_fence(plane_state), - &plane_state->flags); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - plane_state->ggtt_vma = vma; - } else { - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - - vma = intel_dpt_pin(intel_fb->dpt_vm); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - plane_state->ggtt_vma = vma; - - vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false, - &plane_state->flags, intel_fb->dpt_vm); - if (IS_ERR(vma)) { - intel_dpt_unpin(intel_fb->dpt_vm); - plane_state->ggtt_vma = NULL; - return PTR_ERR(vma); - } - - plane_state->dpt_vma = vma; - - WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma); - } - - return 0; -} - -void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) -{ - struct drm_framebuffer *fb = old_plane_state->hw.fb; - struct i915_vma *vma; - - if (!intel_fb_uses_dpt(fb)) { - vma = fetch_and_zero(&old_plane_state->ggtt_vma); - if (vma) - intel_unpin_fb_vma(vma, old_plane_state->flags); - } else { - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - - vma = fetch_and_zero(&old_plane_state->dpt_vma); - if (vma) - intel_unpin_fb_vma(vma, old_plane_state->flags); - - vma = fetch_and_zero(&old_plane_state->ggtt_vma); - if (vma) - intel_dpt_unpin(intel_fb->dpt_vm); - } -} - -/** - * intel_prepare_plane_fb - Prepare fb for usage on plane - * @_plane: drm plane to prepare for - * @_new_plane_state: the plane state being prepared - * - * Prepares a framebuffer for usage on a display plane. Generally this - * involves pinning the underlying object and updating the frontbuffer tracking - * bits. Some older platforms need special physical address handling for - * cursor planes. - * - * Returns 0 on success, negative error code on failure. - */ -int -intel_prepare_plane_fb(struct drm_plane *_plane, - struct drm_plane_state *_new_plane_state) -{ - struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY }; - struct intel_plane *plane = to_intel_plane(_plane); - struct intel_plane_state *new_plane_state = - to_intel_plane_state(_new_plane_state); - struct intel_atomic_state *state = - to_intel_atomic_state(new_plane_state->uapi.state); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct intel_plane_state *old_plane_state = - intel_atomic_get_old_plane_state(state, plane); - struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); - int ret; - - if (old_obj) { - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, - to_intel_crtc(old_plane_state->hw.crtc)); - - /* Big Hammer, we also need to ensure that any pending - * MI_WAIT_FOR_EVENT inside a user batch buffer on the - * current scanout is retired before unpinning the old - * framebuffer. Note that we rely on userspace rendering - * into the buffer attached to the pipe they are waiting - * on. If not, userspace generates a GPU hang with IPEHR - * point to the MI_WAIT_FOR_EVENT. - * - * This should only fail upon a hung GPU, in which case we - * can safely continue. - */ - if (intel_crtc_needs_modeset(crtc_state)) { - ret = i915_sw_fence_await_reservation(&state->commit_ready, - old_obj->base.resv, NULL, - false, 0, - GFP_KERNEL); - if (ret < 0) - return ret; - } - } - - if (new_plane_state->uapi.fence) { /* explicit fencing */ - i915_gem_fence_wait_priority(new_plane_state->uapi.fence, - &attr); - ret = i915_sw_fence_await_dma_fence(&state->commit_ready, - new_plane_state->uapi.fence, - i915_fence_timeout(dev_priv), - GFP_KERNEL); - if (ret < 0) - return ret; - } - - if (!obj) - return 0; - - - ret = intel_plane_pin_fb(new_plane_state); - if (ret) - return ret; - - i915_gem_object_wait_priority(obj, 0, &attr); - i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); - - if (!new_plane_state->uapi.fence) { /* implicit fencing */ - struct dma_fence *fence; - - ret = i915_sw_fence_await_reservation(&state->commit_ready, - obj->base.resv, NULL, - false, - i915_fence_timeout(dev_priv), - GFP_KERNEL); - if (ret < 0) - goto unpin_fb; - - fence = dma_resv_get_excl_unlocked(obj->base.resv); - if (fence) { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - fence); - dma_fence_put(fence); - } - } else { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - new_plane_state->uapi.fence); - } - - /* - * We declare pageflips to be interactive and so merit a small bias - * towards upclocking to deliver the frame on time. By only changing - * the RPS thresholds to sample more regularly and aim for higher - * clocks we can hopefully deliver low power workloads (like kodi) - * that are not quite steady state without resorting to forcing - * maximum clocks following a vblank miss (see do_rps_boost()). - */ - if (!state->rps_interactive) { - intel_rps_mark_interactive(&dev_priv->gt.rps, true); - state->rps_interactive = true; - } - - return 0; - -unpin_fb: - intel_plane_unpin_fb(new_plane_state); - - return ret; -} - -/** - * intel_cleanup_plane_fb - Cleans up an fb after plane use - * @plane: drm plane to clean up for - * @_old_plane_state: the state from the previous modeset - * - * Cleans up a framebuffer that has just been removed from a plane. - */ -void -intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_plane_state *_old_plane_state) -{ - struct intel_plane_state *old_plane_state = - to_intel_plane_state(_old_plane_state); - struct intel_atomic_state *state = - to_intel_atomic_state(old_plane_state->uapi.state); - struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); - - if (!obj) - return; - - if (state->rps_interactive) { - intel_rps_mark_interactive(&dev_priv->gt.rps, false); - state->rps_interactive = false; - } - - /* Should only be called after a successful intel_prepare_plane_fb()! */ - intel_plane_unpin_fb(old_plane_state); -} - /** * intel_plane_destroy - destroy a plane * @plane: plane to destroy @@ -11380,8 +8922,8 @@ static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) struct intel_plane *plane; for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - plane->pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, + plane->pipe); plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); } @@ -11491,6 +9033,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_TC2); intel_ddi_init(dev_priv, PORT_TC3); intel_ddi_init(dev_priv, PORT_TC4); + icl_dsi_init(dev_priv); } else if (IS_ALDERLAKE_S(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_TC1); @@ -11708,249 +9251,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) drm_helper_move_panel_connectors_to_head(&dev_priv->drm); } -static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) -{ - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - - drm_framebuffer_cleanup(fb); - - if (intel_fb_uses_dpt(fb)) - intel_dpt_destroy(intel_fb->dpt_vm); - - intel_frontbuffer_put(intel_fb->frontbuffer); - - kfree(intel_fb); -} - -static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, - struct drm_file *file, - unsigned int *handle) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - if (i915_gem_object_is_userptr(obj)) { - drm_dbg(&i915->drm, - "attempting to use a userptr for a framebuffer, denied\n"); - return -EINVAL; - } - - return drm_gem_handle_create(file, &obj->base, handle); -} - -static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file, - unsigned flags, unsigned color, - struct drm_clip_rect *clips, - unsigned num_clips) -{ - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - - i915_gem_object_flush_if_display(obj); - intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); - - return 0; -} - -static const struct drm_framebuffer_funcs intel_fb_funcs = { - .destroy = intel_user_framebuffer_destroy, - .create_handle = intel_user_framebuffer_create_handle, - .dirty = intel_user_framebuffer_dirty, -}; - -static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, - struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct drm_framebuffer *fb = &intel_fb->base; - u32 max_stride; - unsigned int tiling, stride; - int ret = -EINVAL; - int i; - - intel_fb->frontbuffer = intel_frontbuffer_get(obj); - if (!intel_fb->frontbuffer) - return -ENOMEM; - - i915_gem_object_lock(obj, NULL); - tiling = i915_gem_object_get_tiling(obj); - stride = i915_gem_object_get_stride(obj); - i915_gem_object_unlock(obj); - - if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { - /* - * If there's a fence, enforce that - * the fb modifier and tiling mode match. - */ - if (tiling != I915_TILING_NONE && - tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - drm_dbg_kms(&dev_priv->drm, - "tiling_mode doesn't match fb modifier\n"); - goto err; - } - } else { - if (tiling == I915_TILING_X) { - mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; - } else if (tiling == I915_TILING_Y) { - drm_dbg_kms(&dev_priv->drm, - "No Y tiling for legacy addfb\n"); - goto err; - } - } - - if (!drm_any_plane_has_format(&dev_priv->drm, - mode_cmd->pixel_format, - mode_cmd->modifier[0])) { - drm_dbg_kms(&dev_priv->drm, - "unsupported pixel format %p4cc / modifier 0x%llx\n", - &mode_cmd->pixel_format, mode_cmd->modifier[0]); - goto err; - } - - /* - * gen2/3 display engine uses the fence if present, - * so the tiling mode must match the fb modifier exactly. - */ - if (DISPLAY_VER(dev_priv) < 4 && - tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - drm_dbg_kms(&dev_priv->drm, - "tiling_mode must match fb modifier exactly on gen2/3\n"); - goto err; - } - - max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, - mode_cmd->modifier[0]); - if (mode_cmd->pitches[0] > max_stride) { - drm_dbg_kms(&dev_priv->drm, - "%s pitch (%u) must be at most %d\n", - mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? - "tiled" : "linear", - mode_cmd->pitches[0], max_stride); - goto err; - } - - /* - * If there's a fence, enforce that - * the fb pitch and fence stride match. - */ - if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { - drm_dbg_kms(&dev_priv->drm, - "pitch (%d) must match tiling stride (%d)\n", - mode_cmd->pitches[0], stride); - goto err; - } - - /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ - if (mode_cmd->offsets[0] != 0) { - drm_dbg_kms(&dev_priv->drm, - "plane 0 offset (0x%08x) must be 0\n", - mode_cmd->offsets[0]); - goto err; - } - - drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); - - for (i = 0; i < fb->format->num_planes; i++) { - u32 stride_alignment; - - if (mode_cmd->handles[i] != mode_cmd->handles[0]) { - drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", - i); - goto err; - } - - stride_alignment = intel_fb_stride_alignment(fb, i); - if (fb->pitches[i] & (stride_alignment - 1)) { - drm_dbg_kms(&dev_priv->drm, - "plane %d pitch (%d) must be at least %u byte aligned\n", - i, fb->pitches[i], stride_alignment); - goto err; - } - - if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) { - int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); - - if (fb->pitches[i] != ccs_aux_stride) { - drm_dbg_kms(&dev_priv->drm, - "ccs aux plane %d pitch (%d) must be %d\n", - i, - fb->pitches[i], ccs_aux_stride); - goto err; - } - } - - /* TODO: Add POT stride remapping support for CCS formats as well. */ - if (IS_ALDERLAKE_P(dev_priv) && - mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR && - !intel_fb_needs_pot_stride_remap(intel_fb) && - !is_power_of_2(mode_cmd->pitches[i])) { - drm_dbg_kms(&dev_priv->drm, - "plane %d pitch (%d) must be power of two for tiled buffers\n", - i, mode_cmd->pitches[i]); - goto err; - } - - fb->obj[i] = &obj->base; - } - - ret = intel_fill_fb_info(dev_priv, intel_fb); - if (ret) - goto err; - - if (intel_fb_uses_dpt(fb)) { - struct i915_address_space *vm; - - vm = intel_dpt_create(intel_fb); - if (IS_ERR(vm)) { - ret = PTR_ERR(vm); - goto err; - } - - intel_fb->dpt_vm = vm; - } - - ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); - if (ret) { - drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); - goto err; - } - - return 0; - -err: - intel_frontbuffer_put(intel_fb->frontbuffer); - return ret; -} - -static struct drm_framebuffer * -intel_user_framebuffer_create(struct drm_device *dev, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *user_mode_cmd) -{ - struct drm_framebuffer *fb; - struct drm_i915_gem_object *obj; - struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; - struct drm_i915_private *i915; - - obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); - if (!obj) - return ERR_PTR(-ENOENT); - - /* object is backed with LMEM for discrete */ - i915 = to_i915(obj->base.dev); - if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) { - /* object is "remote", not in local memory */ - i915_gem_object_put(obj); - return ERR_PTR(-EREMOTE); - } - - fb = intel_framebuffer_create(obj, &mode_cmd); - i915_gem_object_put(obj); - - return fb; -} - static enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) @@ -12039,6 +9339,14 @@ intel_mode_valid(struct drm_device *dev, return MODE_V_ILLEGAL; } + /* + * Cantiga+ cannot handle modes with a hsync front porch of 0. + * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. + */ + if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) && + mode->hsync_start == mode->hdisplay) + return MODE_H_ILLEGAL; + return MODE_OK; } @@ -12080,7 +9388,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, - .get_format_info = intel_get_format_info, + .get_format_info = intel_fb_get_format_info, .output_poll_changed = intel_fbdev_output_poll_changed, .mode_valid = intel_mode_valid, .atomic_check = intel_atomic_check, @@ -12090,6 +9398,46 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .atomic_state_free = intel_atomic_state_free, }; +static const struct drm_i915_display_funcs skl_display_funcs = { + .get_pipe_config = hsw_get_pipe_config, + .crtc_enable = hsw_crtc_enable, + .crtc_disable = hsw_crtc_disable, + .commit_modeset_enables = skl_commit_modeset_enables, + .get_initial_plane_config = skl_get_initial_plane_config, +}; + +static const struct drm_i915_display_funcs ddi_display_funcs = { + .get_pipe_config = hsw_get_pipe_config, + .crtc_enable = hsw_crtc_enable, + .crtc_disable = hsw_crtc_disable, + .commit_modeset_enables = intel_commit_modeset_enables, + .get_initial_plane_config = i9xx_get_initial_plane_config, +}; + +static const struct drm_i915_display_funcs pch_split_display_funcs = { + .get_pipe_config = ilk_get_pipe_config, + .crtc_enable = ilk_crtc_enable, + .crtc_disable = ilk_crtc_disable, + .commit_modeset_enables = intel_commit_modeset_enables, + .get_initial_plane_config = i9xx_get_initial_plane_config, +}; + +static const struct drm_i915_display_funcs vlv_display_funcs = { + .get_pipe_config = i9xx_get_pipe_config, + .crtc_enable = valleyview_crtc_enable, + .crtc_disable = i9xx_crtc_disable, + .commit_modeset_enables = intel_commit_modeset_enables, + .get_initial_plane_config = i9xx_get_initial_plane_config, +}; + +static const struct drm_i915_display_funcs i9xx_display_funcs = { + .get_pipe_config = i9xx_get_pipe_config, + .crtc_enable = i9xx_crtc_enable, + .crtc_disable = i9xx_crtc_disable, + .commit_modeset_enables = intel_commit_modeset_enables, + .get_initial_plane_config = i9xx_get_initial_plane_config, +}; + /** * intel_init_display_hooks - initialize the display modesetting hooks * @dev_priv: device private @@ -12100,43 +9448,24 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) return; intel_init_cdclk_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); + intel_audio_hooks_init(dev_priv); intel_dpll_init_clock_hook(dev_priv); if (DISPLAY_VER(dev_priv) >= 9) { - dev_priv->display.get_pipe_config = hsw_get_pipe_config; - dev_priv->display.crtc_enable = hsw_crtc_enable; - dev_priv->display.crtc_disable = hsw_crtc_disable; + dev_priv->display = &skl_display_funcs; } else if (HAS_DDI(dev_priv)) { - dev_priv->display.get_pipe_config = hsw_get_pipe_config; - dev_priv->display.crtc_enable = hsw_crtc_enable; - dev_priv->display.crtc_disable = hsw_crtc_disable; + dev_priv->display = &ddi_display_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->display.get_pipe_config = ilk_get_pipe_config; - dev_priv->display.crtc_enable = ilk_crtc_enable; - dev_priv->display.crtc_disable = ilk_crtc_disable; + dev_priv->display = &pch_split_display_funcs; } else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.get_pipe_config = i9xx_get_pipe_config; - dev_priv->display.crtc_enable = valleyview_crtc_enable; - dev_priv->display.crtc_disable = i9xx_crtc_disable; + dev_priv->display = &vlv_display_funcs; } else { - dev_priv->display.get_pipe_config = i9xx_get_pipe_config; - dev_priv->display.crtc_enable = i9xx_crtc_enable; - dev_priv->display.crtc_disable = i9xx_crtc_disable; + dev_priv->display = &i9xx_display_funcs; } intel_fdi_init_hook(dev_priv); - - if (DISPLAY_VER(dev_priv) >= 9) { - dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; - dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config; - } else { - dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; - dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; - } - } void intel_modeset_init_hw(struct drm_i915_private *i915) @@ -12206,7 +9535,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv) int i; /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->display.optimize_watermarks) + if (!dev_priv->wm_disp->optimize_watermarks) return; state = drm_atomic_state_alloc(&dev_priv->drm); @@ -12239,7 +9568,7 @@ retry: /* Write calculated watermark values back */ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { crtc_state->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(intel_state, crtc); + intel_optimize_watermarks(intel_state, crtc); to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } @@ -12271,22 +9600,6 @@ fail: drm_modeset_acquire_fini(&ctx); } -static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) -{ - if (IS_IRONLAKE(dev_priv)) { - u32 fdi_pll_clk = - intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; - - dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; - } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) { - dev_priv->fdi_pll_freq = 270000; - } else { - return; - } - - drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); -} - static int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; @@ -12381,7 +9694,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915) mode_config->funcs = &intel_mode_funcs; - mode_config->async_page_flip = has_async_flips(i915); + mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); /* * Maximum framebuffer dimensions, chosen to match @@ -12420,22 +9733,6 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915) drm_mode_config_cleanup(&i915->drm); } -static void plane_config_fini(struct intel_initial_plane_config *plane_config) -{ - if (plane_config->fb) { - struct drm_framebuffer *fb = &plane_config->fb->base; - - /* We may only have the stub and not a full framebuffer */ - if (drm_framebuffer_read_refcount(fb)) - drm_framebuffer_put(fb); - else - kfree(fb); - } - - if (plane_config->vma) - i915_vma_put(plane_config->vma); -} - /* part #1: call before irq install */ int intel_modeset_init_noirq(struct drm_i915_private *i915) { @@ -12540,7 +9837,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) intel_plane_possible_crtcs_init(i915); intel_shared_dpll_init(dev); - intel_update_fdi_pll_freq(i915); + intel_fdi_pll_freq_update(i915); intel_update_czclk(i915); intel_modeset_init_hw(i915); @@ -12564,30 +9861,13 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); + intel_acpi_assign_connector_fwnodes(i915); drm_modeset_unlock_all(dev); for_each_intel_crtc(dev, crtc) { - struct intel_initial_plane_config plane_config = {}; - if (!to_intel_crtc_state(crtc->base.state)->uapi.active) continue; - - /* - * Note that reserving the BIOS fb up front prevents us - * from stuffing other stolen allocations like the ring - * on top. This prevents some ugliness at boot time, and - * can even allow for smooth boot transitions if the BIOS - * fb is large enough for the active pipe configuration. - */ - i915->display.get_initial_plane_config(crtc, &plane_config); - - /* - * If the fb is shared between multiple heads, we'll - * just get the first one. - */ - intel_find_initial_plane_obj(crtc, &plane_config); - - plane_config_fini(&plane_config); + intel_crtc_initial_plane_config(crtc); } /* @@ -12636,7 +9916,7 @@ int intel_modeset_init(struct drm_i915_private *i915) void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, @@ -12709,7 +9989,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", pipe_name(pipe)); @@ -12761,7 +10041,7 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", plane->base.base.id, plane->base.name); - plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + plane_crtc = intel_crtc_for_pipe(dev_priv, pipe); intel_plane_disable_noatomic(plane_crtc, plane); } } @@ -12869,13 +10149,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, intel_plane_disable_noatomic(crtc, plane); } - /* - * Disable any background color set by the BIOS, but enable the - * gamma and CSC to match how we program our planes. - */ - if (DISPLAY_VER(dev_priv) >= 9) - intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), - SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); + /* Disable any background color/etc. set by the BIOS */ + intel_color_commit(crtc_state); } /* Adjust the state of the output pipe according to whether we @@ -13019,7 +10294,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) visible = plane->get_hw_state(plane, &pipe); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); intel_set_plane_visible(crtc_state, plane_state, visible); @@ -13076,24 +10351,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) enableddisabled(crtc_state->hw.active)); } - dev_priv->active_pipes = cdclk_state->active_pipes = - dbuf_state->active_pipes = active_pipes; + cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes; readout_plane_state(dev_priv); for_each_intel_encoder(dev, encoder) { + struct intel_crtc_state *crtc_state = NULL; + pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - struct intel_crtc_state *crtc_state; - - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); encoder->base.crtc = &crtc->base; intel_encoder_get_config(encoder, crtc_state); - if (encoder->sync_state) - encoder->sync_state(encoder, crtc_state); /* read out to slave crtc as well for bigjoiner */ if (crtc_state->bigjoiner) { @@ -13108,6 +10380,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) encoder->base.crtc = NULL; } + if (encoder->sync_state) + encoder->sync_state(encoder, crtc_state); + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", encoder->base.base.id, encoder->base.name, @@ -13161,9 +10436,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - if (crtc_state->bigjoiner_slave) - continue; - if (crtc_state->hw.active) { /* * The initial mode needs to be set in order to keep @@ -13223,39 +10495,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); - - /* discard our incomplete slave state, copy it from master */ - if (crtc_state->bigjoiner && crtc_state->hw.active) { - struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; - struct intel_crtc_state *slave_crtc_state = - to_intel_crtc_state(slave->base.state); - - copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); - slave->base.mode = crtc->base.mode; - - cdclk_state->min_cdclk[slave->pipe] = min_cdclk; - cdclk_state->min_voltage_level[slave->pipe] = - crtc_state->min_voltage_level; - - for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - /* - * FIXME don't have the fb yet, so can't - * use intel_plane_data_rate() :( - */ - if (plane_state->uapi.visible) - crtc_state->data_rate[plane->id] = - 4 * crtc_state->pixel_rate; - else - crtc_state->data_rate[plane->id] = 0; - } - - intel_bw_crtc_update(bw_state, slave_crtc_state); - drm_calc_timestamping_constants(&slave->base, - &slave_crtc_state->hw.adjusted_mode); - } } } @@ -13390,17 +10629,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ - - /* Sanitize the TypeC port mode upfront, encoders depend on this */ - for_each_intel_encoder(dev, encoder) { - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - - /* We need to sanitize only the MST primary port. */ - if (encoder->type != INTEL_OUTPUT_DP_MST && - intel_phy_is_tc(dev_priv, phy)) - intel_tc_port_sanitize(enc_to_dig_port(encoder)); - } - get_encoder_power_domains(dev_priv); if (HAS_PCH_IBX(dev_priv)) @@ -13571,7 +10799,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); - intel_fbc_cleanup_cfb(i915); + intel_fbc_cleanup(i915); } /* part #3: call after gem init */ @@ -13586,6 +10814,27 @@ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) intel_bios_driver_remove(i915); } +bool intel_modeset_probe_defer(struct pci_dev *pdev) +{ + struct drm_privacy_screen *privacy_screen; + + /* + * apple-gmux is needed on dual GPU MacBook Pro + * to probe the panel if we're the inactive GPU. + */ + if (vga_switcheroo_client_probe_defer(pdev)) + return true; + + /* If the LCD panel has a privacy-screen, wait for it */ + privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL); + if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER) + return true; + + drm_privacy_screen_put(privacy_screen); + + return false; +} + void intel_display_driver_register(struct drm_i915_private *i915) { if (!HAS_DISPLAY(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 284936f0ddab..b61b75248ded 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -57,6 +57,7 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; +struct pci_dev; enum i915_gpio { GPIOA, @@ -270,6 +271,7 @@ enum tc_port { }; enum tc_port_mode { + TC_PORT_DISCONNECTED, TC_PORT_TBT_ALT, TC_PORT_DP_ALT, TC_PORT_LEGACY, @@ -345,9 +347,33 @@ enum phy_fia { FIA3, }; +enum hpd_pin { + HPD_NONE = 0, + HPD_TV = HPD_NONE, /* TV is known to be unreliable */ + HPD_CRT, + HPD_SDVO_B, + HPD_SDVO_C, + HPD_PORT_A, + HPD_PORT_B, + HPD_PORT_C, + HPD_PORT_D, + HPD_PORT_E, + HPD_PORT_TC1, + HPD_PORT_TC2, + HPD_PORT_TC3, + HPD_PORT_TC4, + HPD_PORT_TC5, + HPD_PORT_TC6, + + HPD_NUM_PINS +}; + +#define for_each_hpd_pin(__pin) \ + for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ - for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p)) + for_each_if(INTEL_INFO(__dev_priv)->display.pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ for_each_pipe(__dev_priv, __p) \ @@ -355,7 +381,7 @@ enum phy_fia { #define for_each_cpu_transcoder(__dev_priv, __t) \ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \ - for_each_if (INTEL_INFO(__dev_priv)->cpu_transcoder_mask & BIT(__t)) + for_each_if (INTEL_INFO(__dev_priv)->display.cpu_transcoder_mask & BIT(__t)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ for_each_cpu_transcoder(__dev_priv, __t) \ @@ -520,7 +546,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status @@ -531,8 +556,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); void intel_plane_destroy(struct drm_plane *plane); -void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state); -void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state); +void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state); +void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state); void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); @@ -541,15 +566,10 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, const char *name, u32 reg); -void lpt_pch_enable(const struct intel_crtc_state *crtc_state); -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); -void lpt_disable_iclkip(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_fb_xy_to_linear(int x, int y, const struct intel_plane_state *state, int plane); -unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, - int color_plane, unsigned int height); void intel_add_fb_offsets(int *x, int *y, const struct intel_plane_state *state, int plane); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); @@ -577,24 +597,10 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); -struct i915_vma * -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor, - const struct i915_ggtt_view *view, - bool uses_fence, - unsigned long *out_flags); -void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); struct drm_framebuffer * intel_framebuffer_create(struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_plane_state *new_state); -void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_plane_state *old_state); -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe); - -int lpt_get_iclkip(struct drm_i915_private *dev_priv); bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); @@ -603,8 +609,11 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); - bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state); @@ -620,24 +629,19 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier); +bool intel_plane_uses_fence(const struct intel_plane_state *plane_state); -int intel_plane_pin_fb(struct intel_plane_state *plane_state); -void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state); - -unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, - int color_plane); -unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane); +void intel_plane_disable_noatomic(struct intel_crtc *crtc, + struct intel_plane *plane); void intel_display_driver_register(struct drm_i915_private *i915); void intel_display_driver_unregister(struct drm_i915_private *i915); /* modesetting */ +bool intel_modeset_probe_defer(struct pci_dev *pdev); void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); int intel_modeset_init_nogem(struct drm_i915_private *i915); @@ -646,27 +650,13 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); -void intel_init_pch_refclk(struct drm_i915_private *dev_priv); int intel_modeset_all_pipes(struct intel_atomic_state *state); /* modesetting asserts */ -void assert_panel_unlocked(struct drm_i915_private *dev_priv, - enum pipe pipe); -void assert_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state); -#define assert_pll_enabled(d, p) assert_pll(d, p, true) -#define assert_pll_disabled(d, p) assert_pll(d, p, false) -void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state); -#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) -#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) -void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state); -#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) -#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) -void assert_pipe(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder, bool state); -#define assert_pipe_enabled(d, t) assert_pipe(d, t, true) -#define assert_pipe_disabled(d, t) assert_pipe(d, t, false) +void assert_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, bool state); +#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true) +#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false) /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 8fdacb252bb1..572445299b04 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -7,18 +7,19 @@ #include <drm/drm_fourcc.h> #include "i915_debugfs.h" +#include "intel_de.h" #include "intel_display_debugfs.h" #include "intel_display_power.h" -#include "intel_de.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" +#include "intel_dp_mst.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_hdcp.h" #include "intel_hdmi.h" #include "intel_pm.h" #include "intel_psr.h" -#include "intel_sideband.h" #include "intel_sprite.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) @@ -39,83 +40,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) return 0; } -static int i915_fbc_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_fbc *fbc = &dev_priv->fbc; - intel_wakeref_t wakeref; - - if (!HAS_FBC(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&fbc->lock); - - if (intel_fbc_is_active(dev_priv)) - seq_puts(m, "FBC enabled\n"); - else - seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - - if (intel_fbc_is_active(dev_priv)) { - u32 mask; - - if (DISPLAY_VER(dev_priv) >= 8) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 7) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 5) - mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; - else if (IS_G4X(dev_priv)) - mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; - else - mask = intel_de_read(dev_priv, FBC_STATUS) & - (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); - - seq_printf(m, "Compressing: %s\n", yesno(mask)); - } - - mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_fbc_false_color_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - *val = dev_priv->fbc.false_color; - - return 0; -} - -static int i915_fbc_false_color_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - u32 reg; - - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - mutex_lock(&dev_priv->fbc.lock); - - reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - dev_priv->fbc.false_color = val; - - intel_de_write(dev_priv, ILK_DPFC_CONTROL, - val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); - - mutex_unlock(&dev_priv->fbc.lock); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, - i915_fbc_false_color_get, i915_fbc_false_color_set, - "%llu\n"); - static int i915_ips_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -302,8 +226,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) }; val = intel_de_read(dev_priv, EDP_PSR2_STATUS(intel_dp->psr.transcoder)); - status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } else { @@ -502,28 +425,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, static int i915_power_domain_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - mutex_lock(&power_domains->lock); - - seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; - enum intel_display_power_domain power_domain; - - power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->desc->name, - power_well->count); - - for_each_power_domain(power_domain, power_well->desc->domains) - seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), - power_domains->domain_use_count[power_domain]); - } + struct drm_i915_private *i915 = node_to_i915(m->private); - mutex_unlock(&power_domains->lock); + intel_display_power_debug(i915, m); return 0; } @@ -1323,9 +1227,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused) return 0; } -#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \ - seq_puts(m, "LPSP: disabled\n")) - static bool intel_lpsp_power_well_enabled(struct drm_i915_private *i915, enum i915_power_well_id power_well_id) @@ -1344,32 +1245,20 @@ intel_lpsp_power_well_enabled(struct drm_i915_private *i915, static int i915_lpsp_status(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - - if (DISPLAY_VER(i915) >= 13) { - LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, - SKL_DISP_PW_2)); + bool lpsp_enabled = false; + + if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) { + lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2); + } else if (IS_DISPLAY_VER(i915, 11, 12)) { + lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3); + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL); + } else { + seq_puts(m, "LPSP: not supported\n"); return 0; } - switch (DISPLAY_VER(i915)) { - case 12: - case 11: - LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3)); - break; - case 10: - case 9: - LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2)); - break; - default: - /* - * Apart from HASWELL/BROADWELL other legacy platform doesn't - * support lpsp. - */ - if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL)); - else - seq_puts(m, "LPSP: not supported\n"); - } + seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled)); return 0; } @@ -1393,7 +1282,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) continue; dig_port = enc_to_dig_port(intel_encoder); - if (!dig_port->dp.can_mst) + if (!intel_dp_mst_source_support(&dig_port->dp)) continue; seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", @@ -2044,11 +1933,9 @@ static int i915_drrs_ctl_set(void *data, u64 val) intel_dp = enc_to_intel_dp(encoder); if (val) - intel_edp_drrs_enable(intel_dp, - crtc_state); + intel_drrs_enable(intel_dp, crtc_state); else - intel_edp_drrs_disable(intel_dp, - crtc_state); + intel_drrs_disable(intel_dp, crtc_state); } drm_connector_list_iter_end(&conn_iter); @@ -2111,9 +1998,7 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; } - ret = intel_fbc_reset_underrun(dev_priv); - if (ret) - return ret; + intel_fbc_reset_underrun(dev_priv); return cnt; } @@ -2127,7 +2012,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list intel_display_debugfs_list[] = { {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, {"i915_ips_status", i915_ips_status, 0}, {"i915_sr_status", i915_sr_status, 0}, {"i915_opregion", i915_opregion, 0}, @@ -2152,7 +2036,6 @@ static const struct { {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, - {"i915_fbc_false_color", &i915_fbc_false_color_fops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, @@ -2179,6 +2062,8 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), minor->debugfs_root, minor); + + intel_fbc_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) @@ -2240,14 +2125,12 @@ static int i915_psr_status_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_psr_status); -#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \ - seq_puts(m, "LPSP: incapable\n")) - static int i915_lpsp_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_encoder *encoder; + bool lpsp_capable = false; encoder = intel_attached_encoder(to_intel_connector(connector)); if (!encoder) @@ -2256,35 +2139,27 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data) if (connector->status != connector_status_connected) return -ENODEV; - if (DISPLAY_VER(i915) >= 13) { - LPSP_CAPABLE(encoder->port <= PORT_B); - return 0; - } - - switch (DISPLAY_VER(i915)) { - case 12: + if (DISPLAY_VER(i915) >= 13) + lpsp_capable = encoder->port <= PORT_B; + else if (DISPLAY_VER(i915) >= 12) /* * Actually TGL can drive LPSP on port till DDI_C * but there is no physical connected DDI_C on TGL sku's, * even driver is not initilizing DDI_C port for gen12. */ - LPSP_CAPABLE(encoder->port <= PORT_B); - break; - case 11: - LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP); - break; - case 10: - case 9: - LPSP_CAPABLE(encoder->port == PORT_A && - (connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); - break; - default: - if (IS_HASWELL(i915) || IS_BROADWELL(i915)) - LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP); - } + lpsp_capable = encoder->port <= PORT_B; + else if (DISPLAY_VER(i915) == 11) + lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP); + else if (IS_DISPLAY_VER(i915, 9, 10)) + lpsp_capable = (encoder->port == PORT_A && + (connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)); + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP; + + seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable"); return 0; } @@ -2468,17 +2343,16 @@ static const struct file_operations i915_dsc_bpp_fops = { * * Cleanup will be done by drm_connector_unregister() through a call to * drm_debugfs_connector_remove(). - * - * Returns 0 on success, negative error codes on error. */ -int intel_connector_debugfs_add(struct drm_connector *connector) +void intel_connector_debugfs_add(struct intel_connector *intel_connector) { + struct drm_connector *connector = &intel_connector->base; struct dentry *root = connector->debugfs_entry; struct drm_i915_private *dev_priv = to_i915(connector->dev); /* The connector must have been registered beforehands. */ if (!root) - return -ENODEV; + return; if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { debugfs_create_file("i915_panel_timings", S_IRUGO, root, @@ -2511,33 +2385,23 @@ int intel_connector_debugfs_add(struct drm_connector *connector) connector, &i915_dsc_bpp_fops); } - /* Legacy panels doesn't lpsp on any platform */ - if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && - (connector->connector_type == DRM_MODE_CONNECTOR_DSI || - connector->connector_type == DRM_MODE_CONNECTOR_eDP || - connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) + if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || + connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) debugfs_create_file("i915_lpsp_capability", 0444, root, connector, &i915_lpsp_capability_fops); - - return 0; } /** * intel_crtc_debugfs_add - add i915 specific crtc debugfs files * @crtc: pointer to a drm_crtc * - * Returns 0 on success, negative error codes on error. - * * Failure to add debugfs entries should generally be ignored. */ -int intel_crtc_debugfs_add(struct drm_crtc *crtc) +void intel_crtc_debugfs_add(struct drm_crtc *crtc) { - if (!crtc->debugfs_entry) - return -ENODEV; - - crtc_updates_add(crtc); - return 0; + if (crtc->debugfs_entry) + crtc_updates_add(crtc); } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h index 557901f3eb90..d3a79c07c384 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h @@ -6,18 +6,18 @@ #ifndef __INTEL_DISPLAY_DEBUGFS_H__ #define __INTEL_DISPLAY_DEBUGFS_H__ -struct drm_connector; struct drm_crtc; struct drm_i915_private; +struct intel_connector; #ifdef CONFIG_DEBUG_FS void intel_display_debugfs_register(struct drm_i915_private *i915); -int intel_connector_debugfs_add(struct drm_connector *connector); -int intel_crtc_debugfs_add(struct drm_crtc *crtc); +void intel_connector_debugfs_add(struct intel_connector *connector); +void intel_crtc_debugfs_add(struct drm_crtc *crtc); #else static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} -static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } -static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; } +static inline void intel_connector_debugfs_add(struct intel_connector *connector) {} +static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {} #endif #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index cce1a926fcc1..05babdcf5f2e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3,24 +3,118 @@ * Copyright © 2019 Intel Corporation */ -#include "display/intel_crt.h" - #include "i915_drv.h" #include "i915_irq.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" -#include "intel_display_power.h" +#include "intel_crt.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dpio_phy.h" +#include "intel_dpll.h" #include "intel_hotplug.h" +#include "intel_pch_refclk.h" +#include "intel_pcode.h" #include "intel_pm.h" #include "intel_pps.h" -#include "intel_sideband.h" #include "intel_snps_phy.h" #include "intel_tc.h" #include "intel_vga.h" +#include "vlv_sideband.h" + +struct i915_power_well_ops { + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); +}; + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +/* Power well structure for haswell */ +struct i915_power_well_desc { + const char *name; + bool always_on; + u64 domains; + /* unique identifier for this power well */ + enum i915_power_well_id id; + /* + * Arbitraty data associated with this power well. Platform and power + * well specific. + */ + union { + struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; + struct { + enum dpio_phy phy; + } bxt; + struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; + /* Mask of pipes whose IRQ logic is backed by the pw */ + u8 irq_pipe_mask; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u16 fixed_enable_delay; + /* The pw is backing the VGA functionality */ + bool has_vga:1; + bool has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + bool is_tc_tbt:1; + } hsw; + }; + const struct i915_power_well_ops *ops; +}; + +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -153,8 +247,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; - case POWER_DOMAIN_DPLL_DC_OFF: - return "DPLL_DC_OFF"; + case POWER_DOMAIN_DC_OFF: + return "DC_OFF"; case POWER_DOMAIN_TC_COLD_OFF: return "TC_COLD_OFF"; default: @@ -433,6 +527,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); + + /* Wa_16013190616:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); + /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -560,7 +659,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, !dig_port)) return; - if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) return; drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); @@ -629,7 +728,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, * exit sequence. */ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); - if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) + if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port)) icl_tc_cold_exit(dev_priv); hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); @@ -893,7 +992,7 @@ static u32 sanitize_target_dc_state(struct drm_i915_private *dev_priv, u32 target_dc_state) { - u32 states[] = { + static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, DC_STATE_EN_DC3CO, @@ -1195,7 +1294,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - dev_priv->display.get_cdclk(dev_priv, &cdclk_config); + intel_cdclk_get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ drm_WARN_ON(&dev_priv->drm, intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, @@ -2801,7 +2900,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, ICL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ + BIT_ULL(POWER_DOMAIN_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define ICL_DDI_IO_A_POWER_DOMAINS ( \ @@ -3104,6 +3203,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) @@ -5270,7 +5370,7 @@ static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) static void icl_mbus_init(struct drm_i915_private *dev_priv) { - unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_regs = INTEL_INFO(dev_priv)->display.abox_mask; u32 mask, val, i; if (IS_ALDERLAKE_P(dev_priv)) @@ -5730,7 +5830,7 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) enum intel_dram_type type = dev_priv->dram_info.type; u8 num_channels = dev_priv->dram_info.num_channels; const struct buddy_page_mask *table; - unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; + unsigned long abox_mask = INTEL_INFO(dev_priv)->display.abox_mask; int config, i; /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ @@ -6389,3 +6489,28 @@ void intel_display_power_resume(struct drm_i915_private *i915) hsw_disable_pc8(i915); } } + +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + int i; + + mutex_lock(&power_domains->lock); + + seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); + for (i = 0; i < power_domains->power_well_count; i++) { + struct i915_power_well *power_well; + enum intel_display_power_domain power_domain; + + power_well = &power_domains->power_wells[i]; + seq_printf(m, "%-25s %d\n", power_well->desc->name, + power_well->count); + + for_each_power_domain(power_domain, power_well->desc->domains) + seq_printf(m, " %-23s %d\n", + intel_display_power_domain_str(power_domain), + power_domains->domain_use_count[power_domain]); + } + + mutex_unlock(&power_domains->lock); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 978531841fa3..686d18eaa24c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -6,11 +6,13 @@ #ifndef __INTEL_DISPLAY_POWER_H__ #define __INTEL_DISPLAY_POWER_H__ -#include "intel_display.h" #include "intel_runtime_pm.h" #include "i915_reg.h" +enum dpio_channel; +enum dpio_phy; struct drm_i915_private; +struct i915_power_well; struct intel_encoder; enum intel_display_power_domain { @@ -117,7 +119,7 @@ enum intel_display_power_domain { POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, - POWER_DOMAIN_DPLL_DC_OFF, + POWER_DOMAIN_DC_OFF, POWER_DOMAIN_TC_COLD_OFF, POWER_DOMAIN_INIT, @@ -155,100 +157,6 @@ enum i915_power_well_id { ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) -struct i915_power_well; - -struct i915_power_well_ops { - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); -}; - -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -/* Power well structure for haswell */ -struct i915_power_well_desc { - const char *name; - bool always_on; - u64 domains; - /* unique identifier for this power well */ - enum i915_power_well_id id; - /* - * Arbitraty data associated with this power well. Platform and power - * well specific. - */ - union { - struct { - /* - * request/status flag index in the PUNIT power well - * control/status registers. - */ - u8 idx; - } vlv; - struct { - enum dpio_phy phy; - } bxt; - struct { - const struct i915_power_well_regs *regs; - /* - * request/status flag index in the power well - * constrol/status registers. - */ - u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; - } hsw; - }; - const struct i915_power_well_ops *ops; -}; - -struct i915_power_well { - const struct i915_power_well_desc *desc; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; -}; - struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend @@ -391,6 +299,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915, intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); } +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); + /* * FIXME: We should probably switch this to a 0-based scheme to be consistent * with how we now name/number DBUF_CTL instances. @@ -410,6 +320,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) +#define with_intel_display_power_if_enabled(i915, domain, wf) \ + for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \ + intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + void chv_phy_powergate_lanes(struct intel_encoder *encoder, bool override, unsigned int mask); bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.c b/drivers/gpu/drm/i915/display/intel_display_trace.c new file mode 100644 index 000000000000..737979ada869 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "intel_display_trace.h" +#endif diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h new file mode 100644 index 000000000000..4043e1276383 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2021 Intel Corporation + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + +#if !defined(__INTEL_DISPLAY_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ) +#define __INTEL_DISPLAY_TRACE_H__ + +#include <linux/types.h> +#include <linux/tracepoint.h> + +#include "i915_drv.h" +#include "intel_crtc.h" +#include "intel_display_types.h" + +TRACE_EVENT(intel_pipe_enable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_disable, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); + } + __entry->pipe = crtc->pipe; + ), + + TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_crc, + TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), + TP_ARGS(crtc, crcs), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(u32, crcs, 5) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], + __entry->crcs[3], __entry->crcs[4]) +); + +TRACE_EVENT(intel_cpu_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_pch_fifo_underrun, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), + TP_ARGS(dev_priv, pch_transcoder), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + __entry->pipe = pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pch transcoder %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_memory_cxsr, + TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), + TP_ARGS(dev_priv, old, new), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(bool, old) + __field(bool, new) + ), + + TP_fast_assign( + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); + } + __entry->old = old; + __entry->new = new; + ), + + TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + onoff(__entry->old), onoff(__entry->new), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(g4x_wm, + TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u16, primary) + __field(u16, sprite) + __field(u16, cursor) + __field(u16, sr_plane) + __field(u16, sr_cursor) + __field(u16, sr_fbc) + __field(u16, hpll_plane) + __field(u16, hpll_cursor) + __field(u16, hpll_fbc) + __field(bool, cxsr) + __field(bool, hpll) + __field(bool, fbc) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + __entry->sr_fbc = wm->sr.fbc; + __entry->hpll_plane = wm->hpll.plane; + __entry->hpll_cursor = wm->hpll.cursor; + __entry->hpll_fbc = wm->hpll.fbc; + __entry->cxsr = wm->cxsr; + __entry->hpll = wm->hpll_en; + __entry->fbc = wm->fbc_en; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->primary, __entry->sprite, __entry->cursor, + yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, + yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, + yesno(__entry->fbc)) +); + +TRACE_EVENT(vlv_wm, + TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), + TP_ARGS(crtc, wm), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, level) + __field(u32, cxsr) + __field(u32, primary) + __field(u32, sprite0) + __field(u32, sprite1) + __field(u32, cursor) + __field(u32, sr_plane) + __field(u32, sr_cursor) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->level = wm->level; + __entry->cxsr = wm->cxsr; + __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; + __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; + __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; + __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; + __entry->sr_plane = wm->sr.plane; + __entry->sr_cursor = wm->sr.cursor; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->level, __entry->cxsr, + __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, + __entry->sr_plane, __entry->sr_cursor) +); + +TRACE_EVENT(vlv_fifo_size, + TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), + TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, sprite0_start) + __field(u32, sprite1_start) + __field(u32, fifo_size) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->sprite0_start = sprite0_start; + __entry->sprite1_start = sprite1_start; + __entry->fifo_size = fifo_size; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->sprite0_start, + __entry->sprite1_start, __entry->fifo_size) +); + +TRACE_EVENT(intel_plane_update_noarm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_update_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_disable_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_activate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_deactivate, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_nuke, + TP_PROTO(struct intel_plane *plane), + TP_ARGS(plane), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev), + plane->pipe); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_end, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_pipe_update_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_vblank_evaded, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __field(u32, min) + __field(u32, max) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = crtc->debug.start_vbl_count; + __entry->scanline = crtc->debug.scanline_start; + __entry->min = crtc->debug.min_vbl; + __entry->max = crtc->debug.max_vbl; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline, __entry->min, __entry->max) +); + +TRACE_EVENT(intel_pipe_update_end, + TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), + TP_ARGS(crtc, frame, scanline_end), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = frame; + __entry->scanline = scanline_end; + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_frontbuffer_invalidate, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +TRACE_EVENT(intel_frontbuffer_flush, + TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), + TP_ARGS(frontbuffer_bits, origin), + + TP_STRUCT__entry( + __field(unsigned int, frontbuffer_bits) + __field(unsigned int, origin) + ), + + TP_fast_assign( + __entry->frontbuffer_bits = frontbuffer_bits; + __entry->origin = origin; + ), + + TP_printk("frontbuffer_bits=0x%08x, origin=%u", + __entry->frontbuffer_bits, __entry->origin) +); + +#endif /* __INTEL_DISPLAY_TRACE_H__ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/display +#define TRACE_INCLUDE_FILE intel_display_trace +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 6beeeeba1bed..c9c6efadf8b4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> +#include <linux/pm_qos.h> #include <linux/pwm.h> #include <linux/sched/clock.h> @@ -35,20 +36,30 @@ #include <drm/drm_crtc.h> #include <drm/drm_dp_dual_mode_helper.h> #include <drm/drm_dp_mst_helper.h> +#include <drm/drm_dsc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> +#include <drm/drm_vblank_work.h> #include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> -#include "i915_drv.h" +#include "i915_vma.h" +#include "i915_vma_types.h" +#include "intel_bios.h" +#include "intel_display.h" +#include "intel_display_power.h" +#include "intel_dpll_mgr.h" +#include "intel_pm_types.h" struct drm_printer; struct __intel_global_objs_state; struct intel_ddi_buf_trans; +struct intel_fbc; +struct intel_connector; /* * Display related stuff @@ -103,8 +114,6 @@ struct intel_fb_view { * in the rotated and remapped GTT view all no-CCS formats (up to 2 * color planes) are supported. * - * TODO: add support for CCS formats in the remapped GTT view. - * * The view information shared by all FB color planes in the FB, * like dst x/y and src/dst width, is stored separately in * intel_plane_state. @@ -117,7 +126,8 @@ struct intel_fb_view { * bytes for 0/180 degree rotation * pixels for 90/270 degree rotation */ - unsigned int stride; + unsigned int mapping_stride; + unsigned int scanout_stride; } color_plane[4]; }; @@ -196,10 +206,6 @@ struct intel_encoder { void (*update_complete)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); - void (*pre_disable)(struct intel_atomic_state *, - struct intel_encoder *, - const struct intel_crtc_state *, - const struct drm_connector_state *); void (*disable)(struct intel_atomic_state *, struct intel_encoder *, const struct intel_crtc_state *, @@ -271,6 +277,9 @@ struct intel_encoder { const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries); + void (*set_signal_levels)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); + enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -428,10 +437,6 @@ struct intel_hdcp_shim { int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port, bool *capable); - /* Detects whether a HDCP 1.4 sink connected in MST topology */ - int (*streams_type1_capable)(struct intel_connector *connector, - bool *capable); - /* Write HDCP2.2 messages */ int (*write_2_2_msg)(struct intel_digital_port *dig_port, void *buf, size_t size); @@ -629,6 +634,12 @@ struct intel_plane_state { struct intel_fb_view view; + /* Plane pxp decryption state */ + bool decrypt; + + /* Plane state to display black pixels when pxp is borked */ + bool force_black; + /* plane control register */ u32 ctl; @@ -684,6 +695,8 @@ struct intel_plane_state { /* Clear Color Value */ u64 ccval; + + const char *no_fbc_reason; }; struct intel_initial_plane_config { @@ -946,7 +959,6 @@ struct intel_crtc_state { * accordingly. */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ -#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */ unsigned long quirks; unsigned fb_bits; /* framebuffers to flip */ @@ -1060,12 +1072,14 @@ struct intel_crtc_state { struct intel_link_m_n dp_m2_n2; bool has_drrs; + /* PSR is supported but might not be enabled due the lack of enabled planes */ bool has_psr; bool has_psr2; bool enable_psr2_sel_fetch; bool req_psr2_sdp_prior_scanline; u32 dc3co_exitline; u16 su_y_granularity; + struct drm_dp_vsc_sdp psr_vsc; /* * Frequence the dpll for the port should run at. Differs from the @@ -1113,8 +1127,6 @@ struct intel_crtc_state { bool crc_enabled; - bool enable_fbc; - bool double_wide; int pbn; @@ -1236,6 +1248,9 @@ struct intel_crtc_state { u8 link_count; u8 pixel_overlap; } splitter; + + /* for loading single buffered registers during vblank */ + struct drm_vblank_work vblank_work; }; enum intel_pipe_crc_source { @@ -1320,6 +1335,9 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; + /* for loading single buffered registers during vblank */ + struct pm_qos_request vblank_pm_qos; + #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif @@ -1330,8 +1348,6 @@ struct intel_plane { enum i9xx_plane_id i9xx_plane; enum plane_id id; enum pipe pipe; - bool has_fbc; - bool has_ccs; bool need_async_flip_disable_wa; u32 frontbuffer_bit; @@ -1339,6 +1355,8 @@ struct intel_plane { u32 base, cntl, size; } cursor; + struct intel_fbc *fbc; + /* * NOTE: Do not place new plane state fields here (e.g., when adding * new plane properties). New runtime state should now be placed in @@ -1357,11 +1375,17 @@ struct intel_plane { unsigned int (*max_stride)(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); - void (*update_plane)(struct intel_plane *plane, + /* Write all non-self arming plane registers */ + void (*update_noarm)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*disable_plane)(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); + /* Write all self-arming plane registers */ + void (*update_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + /* Disable the plane, must arm */ + void (*disable_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); @@ -1529,7 +1553,6 @@ struct intel_psr { u32 dc3co_exitline; u32 dc3co_exit_delay; struct delayed_work dc3co_work; - struct drm_dp_vsc_sdp vsc; }; struct intel_dp { @@ -1559,6 +1582,8 @@ struct intel_dp { int num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; bool use_rate_select; + /* Max sink lane count as reported by DP_MAX_LANE_COUNT */ + int max_sink_lane_count; /* intersection of source and sink rates */ int num_common_rates; int common_rates[DP_MAX_SUPPORTED_RATES]; @@ -1576,7 +1601,6 @@ struct intel_dp { struct intel_pps pps; - bool can_mst; /* this port supports mst */ bool is_mst; int active_mst_links; @@ -1606,8 +1630,6 @@ struct intel_dp { u8 dp_train_pat); void (*set_idle_link_train)(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); - void (*set_signal_levels)(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); u8 (*preemph_max)(struct intel_dp *intel_dp); u8 (*voltage_max)(struct intel_dp *intel_dp, @@ -1639,6 +1661,9 @@ struct intel_dp { struct intel_dp_pcon_frl frl; struct intel_psr psr; + + /* When we last wrote the OUI for eDP */ + unsigned long last_oui_write; }; enum lspcon_vendor { @@ -1667,8 +1692,11 @@ struct intel_digital_port { enum intel_display_power_domain ddi_io_power_domain; intel_wakeref_t ddi_io_wakeref; intel_wakeref_t aux_wakeref; + struct mutex tc_lock; /* protects the TypeC port mode */ intel_wakeref_t tc_lock_wakeref; + enum intel_display_power_domain tc_lock_power_domain; + struct delayed_work tc_disconnect_phy_work; int tc_link_refcount; bool tc_legacy_port:1; char tc_port_name[8]; @@ -1684,6 +1712,8 @@ struct intel_digital_port { bool hdcp_auth_status; /* HDCP port data need to pass to security f/w */ struct hdcp_port_data hdcp_port_data; + /* Whether the MST topology supports HDCP Type 1 Content */ + bool hdcp_mst_type1_capable; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -1751,35 +1781,6 @@ vlv_pipe_to_channel(enum pipe pipe) } } -static inline bool intel_pipe_valid(struct drm_i915_private *i915, enum pipe pipe) -{ - return (pipe >= 0 && - pipe < ARRAY_SIZE(i915->pipe_to_crtc_mapping) && - INTEL_INFO(i915)->pipe_mask & BIT(pipe) && - i915->pipe_to_crtc_mapping[pipe]); -} - -static inline struct intel_crtc * -intel_get_first_crtc(struct drm_i915_private *dev_priv) -{ - return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0)); -} - -static inline struct intel_crtc * -intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */ - drm_WARN_ON(&dev_priv->drm, - !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe))); - return dev_priv->pipe_to_crtc_mapping[pipe]; -} - -static inline struct intel_crtc * -intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane) -{ - return dev_priv->plane_to_crtc_mapping[plane]; -} - struct intel_load_detect_pipe { struct drm_atomic_state *restore_state; }; @@ -1889,11 +1890,7 @@ dp_to_lspcon(struct intel_dp *intel_dp) return &dp_to_dig_port(intel_dp)->lspcon; } -static inline struct drm_i915_private * -dp_to_i915(struct intel_dp *intel_dp) -{ - return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); -} +#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ (intel_dp)->psr.source_support) @@ -1997,33 +1994,6 @@ intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state) return drm_atomic_crtc_needs_modeset(&crtc_state->uapi); } -static inline void -intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - drm_crtc_wait_one_vblank(&crtc->base); -} - -static inline void -intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) -{ - const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - - if (crtc->active) - intel_wait_for_vblank(dev_priv, pipe); -} - -static inline bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) -{ - return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; -} - -static inline bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) -{ - return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); -} - static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state) { return i915_ggtt_offset(plane_state->ggtt_vma); @@ -2035,42 +2005,4 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; } -static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) -{ - if (dev_priv->params.panel_use_ssc >= 0) - return dev_priv->params.panel_use_ssc != 0; - return dev_priv->vbt.lvds_use_ssc - && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); -} - -static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll) -{ - return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; -} - -static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *pipe_config) -{ - if (HAS_DDI(dev_priv)) - return pipe_config->port_clock; /* SPLL */ - else - return dev_priv->fdi_pll_freq; -} - -static inline bool is_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; -} - -static inline bool is_gen12_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; -} - #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 3c3c6cb5c0df..a69b28d65a9b 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -45,8 +45,10 @@ #define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE -#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10) -#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10) +#define GEN13_DMC_MAX_FW_SIZE 0x20000 + +#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14) +#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14) MODULE_FIRMWARE(ADLP_DMC_PATH); #define ADLS_DMC_PATH DMC_PATH(adls, 2, 01) @@ -255,20 +257,10 @@ intel_get_stepping_info(struct drm_i915_private *i915, static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) { - u32 val, mask; - - mask = DC_STATE_DEBUG_MASK_MEMORY_UP; - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - mask |= DC_STATE_DEBUG_MASK_CORES; - /* The below bit doesn't need to be cleared ever afterwards */ - val = intel_de_read(dev_priv, DC_STATE_DEBUG); - if ((val & mask) != mask) { - val |= mask; - intel_de_write(dev_priv, DC_STATE_DEBUG, val); - intel_de_posting_read(dev_priv, DC_STATE_DEBUG); - } + intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0, + DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); + intel_de_posting_read(dev_priv, DC_STATE_DEBUG); } /** @@ -606,7 +598,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, continue; offset = readcount + dmc->dmc_info[id].dmc_offset * 4; - if (fw->size - offset < 0) { + if (offset > fw->size) { drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); continue; } @@ -692,7 +684,7 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; @@ -805,11 +797,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) */ void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) { + int id; + if (!HAS_DMC(dev_priv)) return; intel_dmc_ucode_suspend(dev_priv); drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); - kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload); + for (id = 0; id < DMC_FW_MAX; id++) + kfree(dev_priv->dmc.dmc_info[id].payload); } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index c3c00ff03869..b20f3441ca60 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -20,6 +20,8 @@ enum { DMC_FW_MAIN = 0, DMC_FW_PIPEA, DMC_FW_PIPEB, + DMC_FW_PIPEC, + DMC_FW_PIPED, DMC_FW_MAX }; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 04175f359fd6..b5e2508db1cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/timekeeping.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -44,7 +45,9 @@ #include "i915_drv.h" #include "intel_atomic.h" #include "intel_audio.h" +#include "intel_backlight.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" @@ -55,6 +58,7 @@ #include "intel_dp_mst.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" +#include "intel_drrs.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" #include "intel_hdmi.h" @@ -64,7 +68,6 @@ #include "intel_panel.h" #include "intel_pps.h" #include "intel_psr.h" -#include "intel_sideband.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vrr.h" @@ -100,6 +103,8 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4}; * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. + * + * This function is not safe to use prior to encoder type being set. */ bool intel_dp_is_edp(struct intel_dp *intel_dp) { @@ -111,8 +116,20 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) static void intel_dp_unset_edid(struct intel_dp *intel_dp); static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc); +/* Is link rate UHBR and thus 128b/132b? */ +bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->port_clock >= 1000000; +} + +static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) +{ + intel_dp->sink_rates[0] = 162000; + intel_dp->num_sink_rates = 1; +} + /* update sink rates from dpcd */ -static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) { static const int dp_rates[] = { 162000, 270000, 540000, 810000 @@ -130,6 +147,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) return; } + /* + * Sink rates for 8b/10b. + */ max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); if (max_lttpr_rate) @@ -141,9 +161,92 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) intel_dp->sink_rates[i] = dp_rates[i]; } + /* + * Sink rates for 128b/132b. If set, sink should support all 8b/10b + * rates and 10 Gbps. + */ + if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) { + u8 uhbr_rates = 0; + + BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3); + + drm_dp_dpcd_readb(&intel_dp->aux, + DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates); + + if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) { + /* We have a repeater */ + if (intel_dp->lttpr_common_caps[0] >= 0x20 && + intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] & + DP_PHY_REPEATER_128B132B_SUPPORTED) { + /* Repeater supports 128b/132b, valid UHBR rates */ + uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + } else { + /* Does not support 128b/132b */ + uhbr_rates = 0; + } + } + + if (uhbr_rates & DP_UHBR10) + intel_dp->sink_rates[i++] = 1000000; + if (uhbr_rates & DP_UHBR13_5) + intel_dp->sink_rates[i++] = 1350000; + if (uhbr_rates & DP_UHBR20) + intel_dp->sink_rates[i++] = 2000000; + } + intel_dp->num_sink_rates = i; } +static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp_set_dpcd_sink_rates(intel_dp); + + if (intel_dp->num_sink_rates) + return; + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + intel_dp_set_default_sink_rates(intel_dp); +} + +static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) +{ + intel_dp->max_sink_lane_count = 1; +} + +static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + + switch (intel_dp->max_sink_lane_count) { + case 1: + case 2: + case 4: + return; + } + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + intel_dp->max_sink_lane_count); + + intel_dp_set_default_max_sink_lane_count(intel_dp); +} + /* Get length of rates array potentially limited by max_rate. */ static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) { @@ -166,10 +269,19 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, intel_dp->num_common_rates, max_rate); } +static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) +{ + if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, + index < 0 || index >= intel_dp->num_common_rates)) + return 162000; + + return intel_dp->common_rates[index]; +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) { - return intel_dp->common_rates[intel_dp->num_common_rates - 1]; + return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } /* Theoretical max between source and sink */ @@ -177,7 +289,7 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = dig_port->max_lanes; - int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); @@ -189,9 +301,21 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) int intel_dp_max_lane_count(struct intel_dp *intel_dp) { - return intel_dp->max_link_lane_count; + switch (intel_dp->max_link_lane_count) { + case 1: + case 2: + case 4: + return intel_dp->max_link_lane_count; + default: + MISSING_CASE(intel_dp->max_link_lane_count); + return 1; + } } +/* + * The required data bandwidth for a mode with given pixel clock and bpp. This + * is the required net bandwidth independent of the data bandwidth efficiency. + */ int intel_dp_link_required(int pixel_clock, int bpp) { @@ -199,16 +323,52 @@ intel_dp_link_required(int pixel_clock, int bpp) return DIV_ROUND_UP(pixel_clock * bpp, 8); } +/* + * Given a link rate and lanes, get the data bandwidth. + * + * Data bandwidth is the actual payload rate, which depends on the data + * bandwidth efficiency and the link rate. + * + * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency + * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) = + * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by + * coincidence, the port clock in kHz matches the data bandwidth in kBps, and + * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no + * longer holds for data bandwidth as soon as FEC or MST is taken into account!) + * + * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For + * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875 + * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000 + * does not match the symbol clock, the port clock (not even if you think in + * terms of a byte clock), nor the data bandwidth. It only matches the link bit + * rate in units of 10000 bps. + */ int -intel_dp_max_data_rate(int max_link_clock, int max_lanes) +intel_dp_max_data_rate(int max_link_rate, int max_lanes) { - /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the - * link rate that is generally expressed in Gbps. Since, 8 bits of data - * is transmitted every LS_Clk per lane, there is no need to account for - * the channel encoding that is done in the PHY layer here. + if (max_link_rate >= 1000000) { + /* + * UHBR rates always use 128b/132b channel encoding, and have + * 97.71% data bandwidth efficiency. Consider max_link_rate the + * link bit rate in units of 10000 bps. + */ + int max_link_rate_kbps = max_link_rate * 10; + + max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); + max_link_rate = max_link_rate_kbps / 8; + } + + /* + * Lower than UHBR rates always use 8b/10b channel encoding, and have + * 80% data bandwidth efficiency for SST non-FEC. However, this turns + * out to be a nop by coincidence, and can be skipped: + * + * int max_link_rate_kbps = max_link_rate * 10; + * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); + * max_link_rate = max_link_rate_kbps / 8; */ - return max_link_clock * max_lanes; + return max_link_rate * max_lanes; } bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) @@ -222,6 +382,20 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) encoder->port != PORT_A); } +static int dg2_max_source_rate(struct intel_dp *intel_dp) +{ + return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; +} + +static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) +{ + u32 voltage; + + voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; + + return voltage == VOLTAGE_INFO_0_85V; +} + static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -229,7 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); if (intel_phy_is_combo(dev_priv, phy) && - !intel_dp_is_edp(intel_dp)) + (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) return 540000; return 810000; @@ -237,7 +411,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - if (intel_dp_is_edp(intel_dp)) + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + + if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) + return 540000; + + return 810000; +} + +static int dg1_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) return 540000; return 810000; @@ -248,7 +438,8 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) { /* The values must be in increasing order */ static const int icl_rates[] = { - 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 + 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000, + 1000000, 1350000, }; static const int bxt_rates[] = { 162000, 216000, 243000, 270000, 324000, 432000, 540000 @@ -275,7 +466,12 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 11) { source_rates = icl_rates; size = ARRAY_SIZE(icl_rates); - if (IS_JSL_EHL(dev_priv)) + if (IS_DG2(dev_priv)) + max_rate = dg2_max_source_rate(intel_dp); + else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || + IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) + max_rate = dg1_max_source_rate(intel_dp); + else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else max_rate = icl_max_source_rate(intel_dp); @@ -425,13 +621,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, if (index > 0) { if (intel_dp_is_edp(intel_dp) && !intel_dp_can_link_train_fallback_for_edp(intel_dp, - intel_dp->common_rates[index - 1], + intel_dp_common_rate(intel_dp, index - 1), lane_count)) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with same parameters\n"); return 0; } - intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; + intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { if (intel_dp_is_edp(intel_dp) && @@ -461,7 +657,9 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock) static int small_joiner_ram_size_bits(struct drm_i915_private *i915) { - if (DISPLAY_VER(i915) >= 11) + if (DISPLAY_VER(i915) >= 13) + return 17280 * 8; + else if (DISPLAY_VER(i915) >= 11) return 7680 * 8; else return 6144 * 8; @@ -703,6 +901,17 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, return MODE_OK; } +static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp, + int hdisplay, int clock) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + if (!intel_dp_can_bigjoiner(intel_dp)) + return false; + + return clock > i915->max_dotclk_freq || hdisplay > 5120; +} + static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -726,11 +935,9 @@ intel_dp_mode_valid(struct drm_connector *connector, return MODE_H_ILLEGAL; if (intel_dp_is_edp(intel_dp) && fixed_mode) { - if (mode->hdisplay != fixed_mode->hdisplay) - return MODE_PANEL; - - if (mode->vdisplay != fixed_mode->vdisplay) - return MODE_PANEL; + status = intel_panel_mode_valid(intel_connector, mode); + if (status != MODE_OK) + return status; target_clock = fixed_mode->clock; } @@ -738,8 +945,7 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->clock < 10000) return MODE_CLOCK_LOW; - if ((target_clock > max_dotclk || mode->hdisplay > 5120) && - intel_dp_can_bigjoiner(intel_dp)) { + if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) { bigjoiner = true; max_dotclk *= 2; } @@ -811,18 +1017,14 @@ intel_dp_mode_valid(struct drm_connector *connector, return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); } -bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) +bool intel_dp_source_supports_tps3(struct drm_i915_private *i915) { - int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; - - return max_rate >= 540000; + return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915); } -bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) +bool intel_dp_source_supports_tps4(struct drm_i915_private *i915) { - int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; - - return max_rate >= 810000; + return DISPLAY_VER(i915) >= 10; } static void snprintf_int_array(char *str, size_t len, @@ -865,14 +1067,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) int intel_dp_max_link_rate(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); int len; len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); - if (drm_WARN_ON(&i915->drm, len <= 0)) - return 162000; - return intel_dp->common_rates[len - 1]; + return intel_dp_common_rate(intel_dp, len - 1); } int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) @@ -1044,7 +1243,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, intel_dp->num_common_rates, intel_dp->compliance.test_link_rate); if (index >= 0) - limits->min_clock = limits->max_clock = index; + limits->min_rate = limits->max_rate = + intel_dp->compliance.test_link_rate; limits->min_lane_count = limits->max_lane_count = intel_dp->compliance.test_lane_count; } @@ -1058,8 +1258,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, const struct link_config_limits *limits) { struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - int bpp, clock, lane_count; - int mode_rate, link_clock, link_avail; + int bpp, i, lane_count; + int mode_rate, link_rate, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); @@ -1067,18 +1267,22 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, output_bpp); - for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { + for (i = 0; i < intel_dp->num_common_rates; i++) { + link_rate = intel_dp_common_rate(intel_dp, i); + if (link_rate < limits->min_rate || + link_rate > limits->max_rate) + continue; + for (lane_count = limits->min_lane_count; lane_count <= limits->max_lane_count; lane_count <<= 1) { - link_clock = intel_dp->common_rates[clock]; - link_avail = intel_dp_max_data_rate(link_clock, + link_avail = intel_dp_max_data_rate(link_rate, lane_count); if (mode_rate <= link_avail) { pipe_config->lane_count = lane_count; pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = link_clock; + pipe_config->port_clock = link_rate; return 0; } @@ -1143,7 +1347,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, else vdsc_cfg->slice_height = 2; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -1212,7 +1416,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, * with DSC enabled for the requested mode. */ pipe_config->pipe_bpp = pipe_bpp; - pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; + pipe_config->port_clock = limits->max_rate; pipe_config->lane_count = limits->max_lane_count; if (intel_dp_is_edp(intel_dp)) { @@ -1312,17 +1516,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; - int common_len; int ret; - common_len = intel_dp_common_len_rate_limit(intel_dp, - intel_dp->max_link_rate); - - /* No common link rates between source and sink */ - drm_WARN_ON(encoder->base.dev, common_len <= 0); - - limits.min_clock = 0; - limits.max_clock = common_len - 1; + limits.min_rate = intel_dp_common_rate(intel_dp, 0); + limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = 1; limits.max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -1340,20 +1537,18 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, * values correspond to the native resolution of the panel. */ limits.min_lane_count = limits.max_lane_count; - limits.min_clock = limits.max_clock; + limits.min_rate = limits.max_rate; } intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " "max rate %d max bpp %d pixel clock %iKHz\n", - limits.max_lane_count, - intel_dp->common_rates[limits.max_clock], + limits.max_lane_count, limits.max_rate, limits.max_bpp, adjusted_mode->crtc_clock); - if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || - adjusted_mode->crtc_hdisplay > 5120) && - intel_dp_can_bigjoiner(intel_dp)) + if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_clock)) pipe_config->bigjoiner = true; /* @@ -1553,7 +1748,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, { vsc->sdp_type = DP_SDP_VSC; - if (intel_dp->psr.psr2_enabled) { + if (crtc_state->has_psr2) { if (intel_dp->psr.colorimetry_support && intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { /* [PSR2, +Colorimetry] */ @@ -1603,46 +1798,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); } -static void -intel_dp_drrs_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) -{ - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int pixel_clock; - - if (pipe_config->vrr.enable) - return; - - /* - * DRRS and PSR can't be enable together, so giving preference to PSR - * as it allows more power-savings by complete shutting down display, - * so to guarantee this, intel_dp_drrs_compute_config() must be called - * after intel_psr_compute_config(). - */ - if (pipe_config->has_psr) - return; - - if (!intel_connector->panel.downclock_mode || - dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - pipe_config->has_drrs = true; - - pixel_clock = intel_connector->panel.downclock_mode->clock; - if (pipe_config->splitter.enable) - pixel_clock /= pipe_config->splitter.link_count; - - intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, - pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n, pipe_config->fec_enable); - - /* FIXME: abstract this better */ - if (pipe_config->splitter.enable) - pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; -} - int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -1665,7 +1820,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode); if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { - ret = intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; } @@ -1678,13 +1833,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); + ret = intel_panel_compute_config(intel_connector, adjusted_mode); + if (ret) + return ret; - if (HAS_GMCH(dev_priv)) - ret = intel_gmch_panel_fitting(pipe_config, conn_state); - else - ret = intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; } @@ -1750,9 +1903,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, g4x_dp_set_clock(encoder, pipe_config); intel_vrr_compute_config(pipe_config, conn_state); - intel_psr_compute_config(intel_dp, pipe_config); - intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, - constant_n); + intel_psr_compute_config(intel_dp, pipe_config, conn_state); + intel_drrs_compute_config(intel_dp, pipe_config, output_bpp, + constant_n); intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); @@ -1762,11 +1915,18 @@ intel_dp_compute_config(struct intel_encoder *encoder, void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count) { + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp->link_trained = false; intel_dp->link_rate = link_rate; intel_dp->lane_count = lane_count; } +static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) +{ + intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); +} + /* Enable backlight PWM and backlight PP control. */ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -1779,7 +1939,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, drm_dbg_kms(&i915->drm, "\n"); - intel_panel_enable_backlight(crtc_state, conn_state); + intel_backlight_enable(crtc_state, conn_state); intel_pps_backlight_on(intel_dp); } @@ -1795,7 +1955,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) drm_dbg_kms(&i915->drm, "\n"); intel_pps_backlight_off(intel_dp); - intel_panel_disable_backlight(old_conn_state); + intel_backlight_disable(old_conn_state); } static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) @@ -1852,6 +2012,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) drm_err(&i915->drm, "Failed to write source OUI\n"); + + intel_dp->last_oui_write = jiffies; +} + +void intel_dp_wait_source_oui(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); } /* If the device supports it, try to set the power state appropriately */ @@ -1916,6 +2086,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + if (!crtc_state) + return; + /* * Don't clobber DPCD if it's been already read out during output * setup (eDP) or detect. @@ -1923,8 +2096,7 @@ void intel_dp_sync_state(struct intel_encoder *encoder, if (intel_dp->dpcd[DP_DPCD_REV] == 0) intel_dp_get_dpcd(intel_dp); - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); + intel_dp_reset_max_link_params(intel_dp); } bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, @@ -2038,6 +2210,18 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) return max_frl_rate; } +static bool +intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, + u8 max_frl_bw_mask, u8 *frl_trained_mask) +{ + if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && + drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && + *frl_trained_mask >= max_frl_bw_mask) + return true; + + return false; +} + static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) { #define TIMEOUT_FRL_READY_MS 500 @@ -2048,10 +2232,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) u8 max_frl_bw_mask = 0, frl_trained_mask; bool is_active; - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); - if (ret < 0) - return ret; - max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); @@ -2063,6 +2243,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (max_frl_bw <= 0) return -EINVAL; + max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); + drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); + + if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) + goto frl_trained; + ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); if (ret < 0) return ret; @@ -2072,7 +2258,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (!is_active) return -ETIMEDOUT; - max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, DP_PCON_ENABLE_SEQUENTIAL_LINK); if (ret < 0) @@ -2088,19 +2273,15 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) * Wait for FRL to be completed * Check if the HDMI Link is up and active. */ - wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); + wait_for(is_active = + intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), + TIMEOUT_HDMI_LINK_ACTIVE_MS); if (!is_active) return -ETIMEDOUT; - /* Verify HDMI Link configuration shows FRL Mode */ - if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != - DP_PCON_HDMI_MODE_FRL) { - drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); - return -EINVAL; - } - drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); - +frl_trained: + drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); intel_dp->frl.is_trained = true; drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); @@ -2118,6 +2299,28 @@ static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) return false; } +static +int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) +{ + int ret; + u8 buf = 0; + + /* Set PCON source control mode */ + buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + /* Set HDMI LINK ENABLE */ + buf |= DP_PCON_ENABLE_HDMI_LINK; + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + return 0; +} + void intel_dp_check_frl_training(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -2136,7 +2339,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp) int ret, mode; drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); + ret = intel_dp_pcon_set_tmds_mode(intel_dp); mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) @@ -2389,6 +2592,8 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector, static void intel_edp_mso_init(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_connector *connector = intel_dp->attached_connector; + struct drm_display_info *info = &connector->base.display_info; u8 mso; if (intel_dp->edp_dpcd[0] < DP_EDP_14) @@ -2407,8 +2612,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) } if (mso) { - drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", - mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); + drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n", + mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso, + info->mso_pixel_overlap); if (!HAS_MSO(i915)) { drm_err(&i915->drm, "No source MSO support, disabling\n"); mso = 0; @@ -2416,7 +2622,7 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) } intel_dp->mso_link_count = mso; - intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ + intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0; } static bool @@ -2445,17 +2651,23 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == - sizeof(intel_dp->edp_dpcd)) + sizeof(intel_dp->edp_dpcd)) { drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", (int)sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); + intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; + } + /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] */ intel_psr_init_dpcd(intel_dp); + /* Clear the default sink rates */ + intel_dp->num_sink_rates = 0; + /* Read the eDP 1.4+ supported link rates. */ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -2489,8 +2701,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp->use_rate_select = true; else intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); /* Read the eDP DSC DPCD registers */ if (DISPLAY_VER(dev_priv) >= 10) @@ -2502,8 +2716,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ intel_edp_init_source_oui(intel_dp, true); - intel_edp_mso_init(intel_dp); - return true; } @@ -2535,6 +2747,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_is_branch(intel_dp->dpcd)); intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -2571,7 +2784,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) struct drm_i915_private *i915 = dp_to_i915(intel_dp); return i915->params.enable_dp_mst && - intel_dp->can_mst && + intel_dp_mst_source_support(intel_dp) && drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); } @@ -2586,10 +2799,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", encoder->base.base.id, encoder->base.name, - yesno(intel_dp->can_mst), yesno(sink_can_mst), + yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst), yesno(i915->params.enable_dp_mst)); - if (!intel_dp->can_mst) + if (!intel_dp_mst_source_support(intel_dp)) return; intel_dp->is_mst = sink_can_mst && @@ -2806,7 +3019,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, - struct drm_dp_vsc_sdp *vsc) + const struct drm_dp_vsc_sdp *vsc) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -3693,7 +3906,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, to_intel_crtc_state(crtc->base.state); /* Keep underrun reporting disabled until things are stable */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); if (crtc_state->has_pch_encoder) @@ -4234,12 +4447,7 @@ intel_dp_detect(struct drm_connector *connector, * supports link training fallback params. */ if (intel_dp->reset_link_params || intel_dp->is_mst) { - /* Initial max link lane count */ - intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); - - /* Initial max link rate */ - intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); - + intel_dp_reset_max_link_params(intel_dp); intel_dp->reset_link_params = false; } @@ -4585,6 +4793,17 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, return intel_modeset_synced_crtcs(state, conn); } +static void intel_dp_oob_hotplug_event(struct drm_connector *connector) +{ + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(connector->dev); + + spin_lock_irq(&i915->irq_lock); + i915->hotplug.event_bits |= BIT(encoder->hpd_pin); + spin_unlock_irq(&i915->irq_lock); + queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0); +} + static const struct drm_connector_funcs intel_dp_connector_funcs = { .force = intel_dp_force, .fill_modes = drm_helper_probe_single_connector_modes, @@ -4595,6 +4814,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, + .oob_hotplug_event = intel_dp_oob_hotplug_event, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { @@ -4710,432 +4930,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect drm_connector_attach_vrr_capable_property(connector); } -/** - * intel_dp_set_drrs_state - program registers for RR switch to take effect - * @dev_priv: i915 device - * @crtc_state: a pointer to the active intel_crtc_state - * @refresh_rate: RR to be programmed - * - * This function gets called when refresh rate (RR) has to be changed from - * one frequency to another. Switches can be between high and low RR - * supported by the panel or to any other RR based on media playback (in - * this case, RR value needs to be passed from user space). - * - * The caller of this function needs to take a lock on dev_priv->drrs. - */ -static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state, - int refresh_rate) -{ - struct intel_dp *intel_dp = dev_priv->drrs.dp; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum drrs_refresh_rate_type index = DRRS_HIGH_RR; - - if (refresh_rate <= 0) { - drm_dbg_kms(&dev_priv->drm, - "Refresh rate should be positive non-zero.\n"); - return; - } - - if (intel_dp == NULL) { - drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); - return; - } - - if (!crtc) { - drm_dbg_kms(&dev_priv->drm, - "DRRS: intel_crtc not initialized\n"); - return; - } - - if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); - return; - } - - if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) == - refresh_rate) - index = DRRS_LOW_RR; - - if (index == dev_priv->drrs.refresh_rate_type) { - drm_dbg_kms(&dev_priv->drm, - "DRRS requested for previously set RR...ignoring\n"); - return; - } - - if (!crtc_state->hw.active) { - drm_dbg_kms(&dev_priv->drm, - "eDP encoder disabled. CRTC not Active\n"); - return; - } - - if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { - switch (index) { - case DRRS_HIGH_RR: - intel_dp_set_m_n(crtc_state, M1_N1); - break; - case DRRS_LOW_RR: - intel_dp_set_m_n(crtc_state, M2_N2); - break; - case DRRS_MAX_RR: - default: - drm_err(&dev_priv->drm, - "Unsupported refreshrate type\n"); - } - } else if (DISPLAY_VER(dev_priv) > 6) { - i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); - u32 val; - - val = intel_de_read(dev_priv, reg); - if (index > DRRS_HIGH_RR) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val |= PIPECONF_EDP_RR_MODE_SWITCH; - } else { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; - } - intel_de_write(dev_priv, reg, val); - } - - dev_priv->drrs.refresh_rate_type = index; - - drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", - refresh_rate); -} - -static void -intel_edp_drrs_enable_locked(struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - dev_priv->drrs.busy_frontbuffer_bits = 0; - dev_priv->drrs.dp = intel_dp; -} - -/** - * intel_edp_drrs_enable - init drrs struct if supported - * @intel_dp: DP struct - * @crtc_state: A pointer to the active crtc state. - * - * Initializes frontbuffer_bits and drrs.dp - */ -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!crtc_state->has_drrs) - return; - - drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); - - mutex_lock(&dev_priv->drrs.mutex); - - if (dev_priv->drrs.dp) { - drm_warn(&dev_priv->drm, "DRRS already enabled\n"); - goto unlock; - } - - intel_edp_drrs_enable_locked(intel_dp); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void -intel_edp_drrs_disable_locked(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { - int refresh; - - refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode); - intel_dp_set_drrs_state(dev_priv, crtc_state, refresh); - } - - dev_priv->drrs.dp = NULL; -} - -/** - * intel_edp_drrs_disable - Disable DRRS - * @intel_dp: DP struct - * @old_crtc_state: Pointer to old crtc_state. - * - */ -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!old_crtc_state->has_drrs) - return; - - mutex_lock(&dev_priv->drrs.mutex); - if (!dev_priv->drrs.dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - intel_edp_drrs_disable_locked(intel_dp, old_crtc_state); - mutex_unlock(&dev_priv->drrs.mutex); - - cancel_delayed_work_sync(&dev_priv->drrs.work); -} - -/** - * intel_edp_drrs_update - Update DRRS state - * @intel_dp: Intel DP - * @crtc_state: new CRTC state - * - * This function will update DRRS states, disabling or enabling DRRS when - * executing fastsets. For full modeset, intel_edp_drrs_disable() and - * intel_edp_drrs_enable() should be called instead. - */ -void -intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) - return; - - mutex_lock(&dev_priv->drrs.mutex); - - /* New state matches current one? */ - if (crtc_state->has_drrs == !!dev_priv->drrs.dp) - goto unlock; - - if (crtc_state->has_drrs) - intel_edp_drrs_enable_locked(intel_dp); - else - intel_edp_drrs_disable_locked(intel_dp, crtc_state); - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -static void intel_edp_drrs_downclock_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), drrs.work.work); - struct intel_dp *intel_dp; - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - - if (!intel_dp) - goto unlock; - - /* - * The delayed work can race with an invalidate hence we need to - * recheck. - */ - - if (dev_priv->drrs.busy_frontbuffer_bits) - goto unlock; - - if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode)); - } - -unlock: - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_invalidate - Disable Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called everytime rendering on the given planes start. - * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; - - /* invalidate means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * intel_edp_drrs_flush - Restart Idleness DRRS - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * This function gets called every time rendering on the given planes has - * completed or flip on a crtc is completed. So DRRS should be upclocked - * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, - * if no other planes are dirty. - * - * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. - */ -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits) -{ - struct intel_dp *intel_dp; - struct drm_crtc *crtc; - enum pipe pipe; - - if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) - return; - - cancel_delayed_work(&dev_priv->drrs.work); - - mutex_lock(&dev_priv->drrs.mutex); - - intel_dp = dev_priv->drrs.dp; - if (!intel_dp) { - mutex_unlock(&dev_priv->drrs.mutex); - return; - } - - crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); - dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; - - /* flush means busy screen hence upclock */ - if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) - intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, - drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode)); - - /* - * flush also means no more activity hence schedule downclock, if all - * other fbs are quiescent too - */ - if (!dev_priv->drrs.busy_frontbuffer_bits) - schedule_delayed_work(&dev_priv->drrs.work, - msecs_to_jiffies(1000)); - mutex_unlock(&dev_priv->drrs.mutex); -} - -/** - * DOC: Display Refresh Rate Switching (DRRS) - * - * Display Refresh Rate Switching (DRRS) is a power conservation feature - * which enables swtching between low and high refresh rates, - * dynamically, based on the usage scenario. This feature is applicable - * for internal panels. - * - * Indication that the panel supports DRRS is given by the panel EDID, which - * would list multiple refresh rates for one resolution. - * - * DRRS is of 2 types - static and seamless. - * Static DRRS involves changing refresh rate (RR) by doing a full modeset - * (may appear as a blink on screen) and is used in dock-undock scenario. - * Seamless DRRS involves changing RR without any visual effect to the user - * and can be used during normal system usage. This is done by programming - * certain registers. - * - * Support for static/seamless DRRS may be indicated in the VBT based on - * inputs from the panel spec. - * - * DRRS saves power by switching to low RR based on usage scenarios. - * - * The implementation is based on frontbuffer tracking implementation. When - * there is a disturbance on the screen triggered by user activity or a periodic - * system activity, DRRS is disabled (RR is changed to high RR). When there is - * no movement on screen, after a timeout of 1 second, a switch to low RR is - * made. - * - * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() - * and intel_edp_drrs_flush() are called. - * - * DRRS can be further extended to support other internal panels and also - * the scenario of video playback wherein RR is set based on the rate - * requested by userspace. - */ - -/** - * intel_dp_drrs_init - Init basic DRRS work and mutex. - * @connector: eDP connector - * @fixed_mode: preferred mode of panel - * - * This function is called only once at driver load to initialize basic - * DRRS stuff. - * - * Returns: - * Downclock mode if panel supports it, else return NULL. - * DRRS support is determined by the presence of downclock mode (apart - * from VBT setting). - */ -static struct drm_display_mode * -intel_dp_drrs_init(struct intel_connector *connector, - struct drm_display_mode *fixed_mode) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct drm_display_mode *downclock_mode = NULL; - - INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); - mutex_init(&dev_priv->drrs.mutex); - - if (DISPLAY_VER(dev_priv) <= 6) { - drm_dbg_kms(&dev_priv->drm, - "DRRS supported for Gen7 and above\n"); - return NULL; - } - - if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { - drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); - return NULL; - } - - downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); - if (!downclock_mode) { - drm_dbg_kms(&dev_priv->drm, - "Downclock mode is not found. DRRS not supported\n"); - return NULL; - } - - dev_priv->drrs.type = dev_priv->vbt.drrs_type; - - dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; - drm_dbg_kms(&dev_priv->drm, - "seamless DRRS supported for eDP panel.\n"); - return downclock_mode; -} - static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct intel_connector *intel_connector) { @@ -5194,7 +4988,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, fixed_mode = intel_panel_edid_fixed_mode(intel_connector); if (fixed_mode) - downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); + downclock_mode = intel_drrs_init(intel_connector, fixed_mode); + + /* MSO requires information from the EDID */ + intel_edp_mso_init(intel_dp); /* multiply the mode clock and horizontal timings for MSO */ intel_edp_mso_mode_fixup(intel_connector, fixed_mode); @@ -5227,7 +5024,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) intel_connector->panel.backlight.power = intel_pps_backlight_power; - intel_panel_setup_backlight(connector, pipe); + intel_backlight_setup(intel_connector, pipe); if (fixed_mode) { drm_connector_set_panel_orientation_with_quirk(connector, @@ -5263,7 +5060,7 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ - drm_kms_helper_hotplug_event(connector->dev); + drm_kms_helper_connector_hotplug_event(connector); } bool @@ -5289,8 +5086,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_encoder->base.name)) return false; - intel_dp_set_source_rates(intel_dp); - intel_dp->reset_link_params = true; intel_dp->pps.pps_pipe = INVALID_PIPE; intel_dp->pps.active_pipe = INVALID_PIPE; @@ -5306,28 +5101,26 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, */ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + + /* eDP only on port B and/or C on vlv/chv */ + if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) && + port != PORT_B && port != PORT_C)) + return false; } else { type = DRM_MODE_CONNECTOR_DisplayPort; } + intel_dp_set_source_rates(intel_dp); + intel_dp_set_default_sink_rates(intel_dp); + intel_dp_set_default_max_sink_lane_count(intel_dp); + intel_dp_set_common_rates(intel_dp); + intel_dp_reset_max_link_params(intel_dp); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); - /* - * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but - * for DP the encoder type can be set by the caller to - * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. - */ - if (type == DRM_MODE_CONNECTOR_eDP) - intel_encoder->type = INTEL_OUTPUT_EDP; - - /* eDP only on port B and/or C on vlv/chv */ - if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) && - intel_dp_is_edp(intel_dp) && - port != PORT_B && port != PORT_C)) - return false; - drm_dbg_kms(&dev_priv->drm, "Adding %s connector on [ENCODER:%d:%s]\n", type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", @@ -5408,7 +5201,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) intel_dp = enc_to_intel_dp(encoder); - if (!intel_dp->can_mst) + if (!intel_dp_mst_source_support(intel_dp)) continue; if (intel_dp->is_mst) @@ -5432,7 +5225,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) intel_dp = enc_to_intel_dp(encoder); - if (!intel_dp->can_mst) + if (!intel_dp_mst_source_support(intel_dp)) continue; ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 680631b5b437..b64145a3869a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -26,7 +26,7 @@ struct intel_dp; struct intel_encoder; struct link_config_limits { - int min_clock, max_clock; + int min_rate, max_rate; int min_lane_count, max_lane_count; int min_bpp, max_bpp; }; @@ -58,6 +58,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); bool intel_dp_is_edp(struct intel_dp *intel_dp); +bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd); @@ -70,25 +71,14 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); -void intel_edp_drrs_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); -void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, - unsigned int frontbuffer_bits); - void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); -bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); -bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); +bool intel_dp_source_supports_tps3(struct drm_i915_private *i915); +bool intel_dp_source_supports_tps4(struct drm_i915_private *i915); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); -int intel_dp_max_data_rate(int max_link_clock, int max_lanes); +int intel_dp_max_data_rate(int max_link_rate, int max_lanes); bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -98,7 +88,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, struct drm_dp_vsc_sdp *vsc); void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, - struct drm_dp_vsc_sdp *vsc); + const struct drm_dp_vsc_sdp *vsc); void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -129,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_wait_source_oui(struct intel_dp *intel_dp); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index f483f479dd0b..5fbb767fcd63 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -150,9 +150,6 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, u32 unused) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *i915 = - to_i915(dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, dig_port->base.port); u32 ret; /* @@ -170,8 +167,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); - if (intel_phy_is_tc(i915, phy) && - dig_port->tc_mode == TC_PORT_TBT_ALT) + if (intel_tc_port_in_tbt_alt_mode(dig_port)) ret |= DP_AUX_CH_CTL_TBT_IO; return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 6ac568617ef3..97cf3cac0105 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -34,9 +34,11 @@ * for some reason. */ +#include "i915_drv.h" +#include "intel_backlight.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dp_aux_backlight.h" -#include "intel_panel.h" /* TODO: * Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we @@ -106,6 +108,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) int ret; u8 tcon_cap[4]; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); if (ret != sizeof(tcon_cap)) return false; @@ -146,7 +150,7 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe if (!panel->backlight.edp.intel.sdr_uses_aux) { u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe); - return intel_panel_backlight_level_from_pwm(connector, pwm_level); + return intel_backlight_level_from_pwm(connector, pwm_level); } /* Assume 100% brightness if backlight controls aren't enabled yet */ @@ -187,9 +191,9 @@ intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 if (panel->backlight.edp.intel.sdr_uses_aux) { intel_dp_aux_hdr_set_aux_backlight(conn_state, level); } else { - const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level); + const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); - intel_panel_set_pwm_level(conn_state, pwm_level); + intel_backlight_set_pwm_level(conn_state, pwm_level); } } @@ -204,6 +208,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, int ret; u8 old_ctrl, ctrl; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); @@ -215,7 +221,7 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; intel_dp_aux_hdr_set_aux_backlight(conn_state, level); } else { - u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level); + u32 pwm_level = intel_backlight_level_to_pwm(connector, level); panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); @@ -238,7 +244,7 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, return; /* Note we want the actual pwm_level to be 0, regardless of pwm_min */ - panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0)); + panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0)); } static int @@ -282,6 +288,12 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3 struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_set) { + const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); + + intel_backlight_set_pwm_level(conn_state, pwm_level); + } + drm_edp_backlight_set_level(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -293,6 +305,18 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + if (!panel->backlight.edp.vesa.info.aux_enable) { + u32 pwm_level; + + if (!panel->backlight.edp.vesa.info.aux_set) + pwm_level = intel_backlight_level_to_pwm(connector, level); + else + pwm_level = intel_backlight_invert_pwm_level(connector, + panel->backlight.pwm_level_max); + + panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level); + } + drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level); } @@ -304,6 +328,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info); + + if (!panel->backlight.edp.vesa.info.aux_enable) + panel->backlight.pwm_funcs->disable(old_conn_state, + intel_backlight_invert_pwm_level(connector, 0)); } static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe) @@ -321,14 +349,36 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; - panel->backlight.max = panel->backlight.edp.vesa.info.max; - panel->backlight.min = 0; - if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - panel->backlight.level = current_level; - panel->backlight.enabled = panel->backlight.level != 0; + if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { + ret = panel->backlight.pwm_funcs->setup(connector, pipe); + if (ret < 0) { + drm_err(&i915->drm, + "Failed to setup PWM backlight controls for eDP backlight: %d\n", + ret); + return ret; + } + } + + if (panel->backlight.edp.vesa.info.aux_set) { + panel->backlight.max = panel->backlight.edp.vesa.info.max; + panel->backlight.min = 0; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + panel->backlight.level = current_level; + panel->backlight.enabled = panel->backlight.level != 0; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } else { - panel->backlight.level = panel->backlight.max; - panel->backlight.enabled = false; + panel->backlight.max = panel->backlight.pwm_level_max; + panel->backlight.min = panel->backlight.pwm_level_min; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { + panel->backlight.level = panel->backlight.pwm_funcs->get(connector, pipe); + panel->backlight.enabled = panel->backlight.pwm_enabled; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; + } } return 0; @@ -340,12 +390,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); - /* TODO: We currently only support AUX only backlight configurations, not backlights which - * require a mix of PWM and AUX controls to work. In the mean time, these machines typically - * work just fine using normal PWM controls anyway. - */ - if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && - drm_edp_backlight_supported(intel_dp->edp_dpcd)) { + if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); return true; } @@ -417,11 +462,17 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) } /* - * A lot of eDP panels in the wild will report supporting both the - * Intel proprietary backlight control interface, and the VESA - * backlight control interface. Many of these panels are liars though, - * and will only work with the Intel interface. So, always probe for - * that first. + * Since Intel has their own backlight control interface, the majority of machines out there + * using DPCD backlight controls with Intel GPUs will be using this interface as opposed to + * the VESA interface. However, other GPUs (such as Nvidia's) will always use the VESA + * interface. This means that there's quite a number of panels out there that will advertise + * support for both interfaces, primarily systems with Intel/Nvidia hybrid GPU setups. + * + * There's a catch to this though: on many panels that advertise support for both + * interfaces, the VESA backlight interface will stop working once we've programmed the + * panel with Intel's OUI - which is also required for us to be able to detect Intel's + * backlight interface at all. This means that the only sensible way for us to detect both + * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index d697d169e8c1..540a669e01dd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -446,8 +446,6 @@ static int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, void *buf, size_t size) { - struct intel_dp *dp = &dig_port->dp; - struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_write, len; @@ -463,8 +461,6 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, bytes_to_write = size - 1; byte++; - hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); - while (bytes_to_write) { len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; @@ -482,29 +478,11 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, return size; } -static int -get_rxinfo_hdcp_1_dev_downstream(struct intel_digital_port *dig_port, bool *hdcp_1_x) -{ - u8 rx_info[HDCP_2_2_RXINFO_LEN]; - int ret; - - ret = drm_dp_dpcd_read(&dig_port->dp.aux, - DP_HDCP_2_2_REG_RXINFO_OFFSET, - (void *)rx_info, HDCP_2_2_RXINFO_LEN); - - if (ret != HDCP_2_2_RXINFO_LEN) - return ret >= 0 ? -EIO : ret; - - *hdcp_1_x = HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) ? true : false; - return 0; -} - static -ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) +ssize_t get_receiver_id_list_rx_info(struct intel_digital_port *dig_port, u32 *dev_cnt, u8 *byte) { - u8 rx_info[HDCP_2_2_RXINFO_LEN]; - u32 dev_cnt; ssize_t ret; + u8 *rx_info = byte; ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RXINFO_OFFSET, @@ -512,15 +490,11 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) if (ret != HDCP_2_2_RXINFO_LEN) return ret >= 0 ? -EIO : ret; - dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + *dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) - dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; - - ret = sizeof(struct hdcp2_rep_send_receiverid_list) - - HDCP_2_2_RECEIVER_IDS_MAX_LEN + - (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + if (*dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + *dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; return ret; } @@ -530,12 +504,15 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *dp = &dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; const struct hdcp2_dp_msg_data *hdcp2_msg_data; ktime_t msg_end = ktime_set(0, 0); bool msg_expired; + u32 dev_cnt; hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); if (!hdcp2_msg_data) @@ -546,17 +523,24 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, if (ret < 0) return ret; + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + /* DP adaptation msgs has no msg_id */ + byte++; + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { - ret = get_receiver_id_list_size(dig_port); + ret = get_receiver_id_list_rx_info(dig_port, &dev_cnt, byte); if (ret < 0) return ret; - size = ret; + byte += ret; + size = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RXINFO_LEN - HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + offset += HDCP_2_2_RXINFO_LEN; } - bytes_to_recv = size - 1; - /* DP adaptation msgs has no msg_id */ - byte++; + bytes_to_recv = size - 1; while (bytes_to_recv) { len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? @@ -664,27 +648,6 @@ int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, return 0; } -static -int intel_dp_mst_streams_type1_capable(struct intel_connector *connector, - bool *capable) -{ - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - int ret; - bool hdcp_1_x; - - ret = get_rxinfo_hdcp_1_dev_downstream(dig_port, &hdcp_1_x); - if (ret) { - drm_dbg_kms(&i915->drm, - "[%s:%d] failed to read RxInfo ret=%d\n", - connector->base.name, connector->base.base.id, ret); - return ret; - } - - *capable = !hdcp_1_x; - return 0; -} - static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .write_an_aksv = intel_dp_hdcp_write_an_aksv, .read_bksv = intel_dp_hdcp_read_bksv, @@ -833,7 +796,6 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = { .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption, .check_2_2_link = intel_dp_mst_hdcp2_check_link, .hdcp_2_2_capable = intel_dp_hdcp2_capable, - .streams_type1_capable = intel_dp_mst_streams_type1_capable, .protocol = HDCP_PROTOCOL_DP, }; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 053a3c2f7267..9451f336f28f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,20 +21,11 @@ * IN THE SOFTWARE. */ +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" -static void -intel_dp_dump_link_status(struct drm_device *drm, - const u8 link_status[DP_LINK_STATUS_SIZE]) -{ - drm_dbg_kms(drm, - "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", - link_status[0], link_status[1], link_status[2], - link_status[3], link_status[4], link_status[5]); -} - static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); @@ -66,6 +57,7 @@ static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); char phy_name[10]; @@ -73,21 +65,22 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "failed to read the PHY caps for %s\n", - phy_name); + "[ENCODER:%d:%s][%s] failed to read the PHY caps\n", + encoder->base.base.id, encoder->base.name, phy_name); return; } drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "%s PHY capabilities: %*ph\n", - phy_name, + "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n", + encoder->base.base.id, encoder->base.name, phy_name, (int)sizeof(intel_dp->lttpr_phy_caps[0]), phy_caps); } static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); if (intel_dp_is_edp(intel_dp)) return false; @@ -104,7 +97,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp) goto reset_caps; drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "LTTPR common capabilities: %*ph\n", + "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n", + encoder->base.base.id, encoder->base.name, (int)sizeof(intel_dp->lttpr_common_caps), intel_dp->lttpr_common_caps); @@ -130,6 +124,8 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) static int intel_dp_init_lttpr(struct intel_dp *intel_dp) { + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); int lttpr_count; int i; @@ -161,8 +157,9 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp) return 0; if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n", + encoder->base.base.id, encoder->base.name); intel_dp_set_lttpr_transparent_mode(intel_dp, true); intel_dp_reset_lttpr_count(intel_dp); @@ -301,21 +298,57 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, return preemph_max; } -void -intel_dp_get_adjust_train(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - enum drm_dp_phy dp_phy, - const u8 link_status[DP_LINK_STATUS_SIZE]) +static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || + DISPLAY_VER(i915) >= 11; +} + +/* 128b/132b */ +static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + u8 tx_ffe = 0; + + if (has_per_lane_signal_levels(intel_dp, dp_phy)) { + lane = min(lane, crtc_state->lane_count - 1); + tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane); + } else { + for (lane = 0; lane < crtc_state->lane_count; lane++) + tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane)); + } + + return tx_ffe; +} + +/* 8b/10b */ +static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) { u8 v = 0; u8 p = 0; - int lane; u8 voltage_max; u8 preemph_max; - for (lane = 0; lane < crtc_state->lane_count; lane++) { - v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); - p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); + if (has_per_lane_signal_levels(intel_dp, dp_phy)) { + lane = min(lane, crtc_state->lane_count - 1); + + v = drm_dp_get_adjust_request_voltage(link_status, lane); + p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + } else { + for (lane = 0; lane < crtc_state->lane_count; lane++) { + v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); + p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); + } } preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); @@ -328,8 +361,79 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, if (v >= voltage_max) v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + return v | p; +} + +static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE], + int lane) +{ + if (intel_dp_is_uhbr(crtc_state)) + return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state, + dp_phy, link_status, lane); + else + return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state, + dp_phy, link_status, lane); +} + +#define TRAIN_REQ_FMT "%d/%d/%d/%d" +#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \ + (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT) +#define TRAIN_REQ_VSWING_ARGS(link_status) \ + _TRAIN_REQ_VSWING_ARGS(link_status, 0), \ + _TRAIN_REQ_VSWING_ARGS(link_status, 1), \ + _TRAIN_REQ_VSWING_ARGS(link_status, 2), \ + _TRAIN_REQ_VSWING_ARGS(link_status, 3) +#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \ + (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT) +#define TRAIN_REQ_PREEMPH_ARGS(link_status) \ + _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \ + _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \ + _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \ + _TRAIN_REQ_PREEMPH_ARGS(link_status, 3) +#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \ + drm_dp_get_adjust_tx_ffe_preset((link_status), (lane)) +#define TRAIN_REQ_TX_FFE_ARGS(link_status) \ + _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \ + _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \ + _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ + _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) + +void +intel_dp_get_adjust_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + char phy_name[10]; + int lane; + + if (intel_dp_is_uhbr(crtc_state)) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " + "TX FFE request: " TRAIN_REQ_FMT "\n", + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + crtc_state->lane_count, + TRAIN_REQ_TX_FFE_ARGS(link_status)); + } else { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, " + "vswing request: " TRAIN_REQ_FMT ", " + "pre-emphasis request: " TRAIN_REQ_FMT "\n", + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + crtc_state->lane_count, + TRAIN_REQ_VSWING_ARGS(link_status), + TRAIN_REQ_PREEMPH_ARGS(link_status)); + } + for (lane = 0; lane < 4; lane++) - intel_dp->train_set[lane] = v | p; + intel_dp->train_set[lane] = + intel_dp_get_lane_adjust_train(intel_dp, crtc_state, + dp_phy, link_status, lane); } static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, @@ -351,7 +455,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, int len; intel_dp_program_link_training_pattern(intel_dp, crtc_state, - dp_train_pat); + dp_phy, dp_train_pat); buf[0] = dp_train_pat; /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ @@ -379,40 +483,77 @@ static char dp_training_pattern_name(u8 train_pat) void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, u8 dp_train_pat) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); + char phy_name[10]; if (train_pat != DP_TRAINING_PATTERN_DISABLE) - drm_dbg_kms(&dev_priv->drm, - "[ENCODER:%d:%s] Using DP training pattern TPS%c\n", + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n", encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), dp_training_pattern_name(train_pat)); intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); } +#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s" +#define _TRAIN_SET_VSWING_ARGS(train_set) \ + ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \ + (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : "" +#define TRAIN_SET_VSWING_ARGS(train_set) \ + _TRAIN_SET_VSWING_ARGS((train_set)[0]), \ + _TRAIN_SET_VSWING_ARGS((train_set)[1]), \ + _TRAIN_SET_VSWING_ARGS((train_set)[2]), \ + _TRAIN_SET_VSWING_ARGS((train_set)[3]) +#define _TRAIN_SET_PREEMPH_ARGS(train_set) \ + ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \ + (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : "" +#define TRAIN_SET_PREEMPH_ARGS(train_set) \ + _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \ + _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \ + _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \ + _TRAIN_SET_PREEMPH_ARGS((train_set)[3]) +#define _TRAIN_SET_TX_FFE_ARGS(train_set) \ + ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), "" +#define TRAIN_SET_TX_FFE_ARGS(train_set) \ + _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \ + _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \ + _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \ + _TRAIN_SET_TX_FFE_ARGS((train_set)[3]) + void intel_dp_set_signal_levels(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 train_set = intel_dp->train_set[0]; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); char phy_name[10]; - drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n", - train_set & DP_TRAIN_VOLTAGE_SWING_MASK, - train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "", - (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> - DP_TRAIN_PRE_EMPHASIS_SHIFT, - train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? - " (max)" : "", - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); + if (intel_dp_is_uhbr(crtc_state)) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, " + "TX FFE presets: " TRAIN_SET_FMT "\n", + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + crtc_state->lane_count, + TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); + } else { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, " + "vswing levels: " TRAIN_SET_FMT ", " + "pre-emphasis levels: " TRAIN_SET_FMT "\n", + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + crtc_state->lane_count, + TRAIN_SET_VSWING_ARGS(intel_dp->train_set), + TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); + } if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) - intel_dp->set_signal_levels(intel_dp, crtc_state); + encoder->set_signal_levels(encoder, crtc_state); } static bool @@ -444,15 +585,55 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, return ret == crtc_state->lane_count; } +/* 128b/132b */ +static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane) +{ + return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) == + DP_TX_FFE_PRESET_VALUE_MASK; +} + +/* + * 8b/10b + * + * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to + * have self contradicting tests around this area. + * + * In lieu of better ideas let's just stop when we've reached the max supported + * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on + * whether vswing level 3 is supported or not. + */ +static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane) +{ + u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0) + return false; + + if (v + p != 3) + return false; + + return true; +} + static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { int lane; - for (lane = 0; lane < crtc_state->lane_count; lane++) - if ((intel_dp->train_set[lane] & - DP_TRAIN_MAX_SWING_REACHED) == 0) - return false; + for (lane = 0; lane < crtc_state->lane_count; lane++) { + u8 train_set_lane = intel_dp->train_set[lane]; + + if (intel_dp_is_uhbr(crtc_state)) { + if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane)) + return false; + } else { + if (!intel_dp_lane_max_vswing_reached(train_set_lane)) + return false; + } + } return true; } @@ -465,7 +646,8 @@ static bool intel_dp_prepare_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); u8 link_config[2]; u8 link_bw, rate_select; @@ -477,10 +659,12 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, if (link_bw) drm_dbg_kms(&i915->drm, - "Using LINK_BW_SET value %02x\n", link_bw); + "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n", + encoder->base.base.id, encoder->base.name, link_bw); else drm_dbg_kms(&i915->drm, - "Using LINK_RATE_SET value %02x\n", rate_select); + "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n", + encoder->base.base.id, encoder->base.name, rate_select); /* Write the link configuration data */ link_config[0] = link_bw; @@ -495,21 +679,53 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, &rate_select, 1); link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; - link_config[1] = DP_SET_ANSI_8B10B; + link_config[1] = intel_dp_is_uhbr(crtc_state) ? + DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); - intel_dp->DP |= DP_PORT_EN; - return true; } -static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) +static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, + const u8 old_link_status[DP_LINK_STATUS_SIZE], + const u8 new_link_status[DP_LINK_STATUS_SIZE]) { - if (dp_phy == DP_PHY_DPRX) - drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd); - else - drm_dp_lttpr_link_train_clock_recovery_delay(); + int lane; + + for (lane = 0; lane < crtc_state->lane_count; lane++) { + u8 old, new; + + if (intel_dp_is_uhbr(crtc_state)) { + old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane); + new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane); + } else { + old = drm_dp_get_adjust_request_voltage(old_link_status, lane) | + drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane); + new = drm_dp_get_adjust_request_voltage(new_link_status, lane) | + drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane); + } + + if (old != new) + return true; + } + + return false; +} + +static void +intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, + const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + char phy_name[10]; + + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); } /* @@ -521,16 +737,27 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - u8 voltage; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; int voltage_tries, cr_tries, max_cr_tries; + u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; + char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); + + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); /* clock recovery */ if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { - drm_err(&i915->drm, "failed to enable link training\n"); + drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n", + encoder->base.base.id, encoder->base.name, phy_name); return false; } @@ -549,124 +776,124 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { - u8 link_status[DP_LINK_STATUS_SIZE]; - - intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); + usleep_range(delay_us, 2 * delay_us); if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { - drm_err(&i915->drm, "failed to get link status\n"); + drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n", + encoder->base.base.id, encoder->base.name, phy_name); return false; } if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { - drm_dbg_kms(&i915->drm, "clock recovery OK\n"); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s][%s] Clock recovery OK\n", + encoder->base.base.id, encoder->base.name, phy_name); return true; } if (voltage_tries == 5) { + intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, - "Same voltage tried 5 times\n"); + "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n", + encoder->base.base.id, encoder->base.name, phy_name); return false; } if (max_vswing_reached) { - drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n"); + intel_dp_dump_link_status(intel_dp, dp_phy, link_status); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n", + encoder->base.base.id, encoder->base.name, phy_name); return false; } - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - /* Update training set as requested by target */ intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, - "failed to update link training\n"); + "[ENCODER:%d:%s][%s] Failed to update link training\n", + encoder->base.base.id, encoder->base.name, phy_name); return false; } - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == - voltage) + if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status)) ++voltage_tries; else voltage_tries = 1; + memcpy(old_link_status, link_status, sizeof(link_status)); + if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) max_vswing_reached = true; - } + + intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_err(&i915->drm, - "Failed clock recovery %d times, giving up!\n", max_cr_tries); + "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n", + encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries); + return false; } /* - * Pick training pattern for channel equalization. Training pattern 4 for HBR3 - * or for 1.4 devices that support it, training Pattern 3 for HBR2 - * or 1.2 devices that support it, Training Pattern 2 otherwise. + * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 + * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or + * 1.2 devices that support it, TPS2 otherwise. */ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); bool source_tps3, sink_tps3, source_tps4, sink_tps4; + /* UHBR+ use separate 128b/132b TPS2 */ + if (intel_dp_is_uhbr(crtc_state)) + return DP_TRAINING_PATTERN_2; + /* - * Intel platforms that support HBR3 also support TPS4. It is mandatory - * for all downstream devices that support HBR3. There are no known eDP - * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 - * specification. + * TPS4 support is mandatory for all downstream devices that + * support HBR3. There are no known eDP panels that support + * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. * LTTPRs must support TPS4. */ - source_tps4 = intel_dp_source_supports_hbr3(intel_dp); + source_tps4 = intel_dp_source_supports_tps4(i915); sink_tps4 = dp_phy != DP_PHY_DPRX || drm_dp_tps4_supported(intel_dp->dpcd); if (source_tps4 && sink_tps4) { return DP_TRAINING_PATTERN_4; } else if (crtc_state->port_clock == 810000) { if (!source_tps4) - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "8.1 Gbps link rate without source HBR3/TPS4 support\n"); + drm_dbg_kms(&i915->drm, + "8.1 Gbps link rate without source TPS4 support\n"); if (!sink_tps4) - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + drm_dbg_kms(&i915->drm, "8.1 Gbps link rate without sink TPS4 support\n"); } + /* - * Intel platforms that support HBR2 also support TPS3. TPS3 support is - * also mandatory for downstream devices that support HBR2. However, not - * all sinks follow the spec. + * TPS3 support is mandatory for downstream devices that + * support HBR2. However, not all sinks follow the spec. */ - source_tps3 = intel_dp_source_supports_hbr2(intel_dp); + source_tps3 = intel_dp_source_supports_tps3(i915); sink_tps3 = dp_phy != DP_PHY_DPRX || drm_dp_tps3_supported(intel_dp->dpcd); if (source_tps3 && sink_tps3) { return DP_TRAINING_PATTERN_3; } else if (crtc_state->port_clock >= 540000) { if (!source_tps3) - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); + drm_dbg_kms(&i915->drm, + ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); if (!sink_tps3) - drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + drm_dbg_kms(&i915->drm, ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); } return DP_TRAINING_PATTERN_2; } -static void -intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) -{ - if (dp_phy == DP_PHY_DPRX) { - drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd); - } else { - const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); - - drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps); - } -} - /* * Perform the link training channel equalization phase on the given DP PHY * using one of training pattern 2, 3 or 4 depending on the source and @@ -677,11 +904,20 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); int tries; u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; + char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); + + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ @@ -691,35 +927,41 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, training_pattern)) { - drm_err(&i915->drm, "failed to start channel equalization\n"); + drm_err(&i915->drm, + "[ENCODER:%d:%s][%s] Failed to start channel equalization\n", + encoder->base.base.id, encoder->base.name, + phy_name); return false; } for (tries = 0; tries < 5; tries++) { - intel_dp_link_training_channel_equalization_delay(intel_dp, - dp_phy); + usleep_range(delay_us, 2 * delay_us); + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { drm_err(&i915->drm, - "failed to get link status\n"); + "[ENCODER:%d:%s][%s] Failed to get link status\n", + encoder->base.base.id, encoder->base.name, phy_name); break; } /* Make sure clock is still ok */ if (!drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { - intel_dp_dump_link_status(&i915->drm, link_status); + intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, - "Clock recovery check failed, cannot " - "continue channel equalization\n"); + "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot " + "continue channel equalization\n", + encoder->base.base.id, encoder->base.name, phy_name); break; } if (drm_dp_channel_eq_ok(link_status, crtc_state->lane_count)) { channel_eq = true; - drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training " - "successful\n"); + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n", + encoder->base.base.id, encoder->base.name, phy_name); break; } @@ -728,16 +970,18 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, link_status); if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { drm_err(&i915->drm, - "failed to update link training\n"); + "[ENCODER:%d:%s][%s] Failed to update link training\n", + encoder->base.base.id, encoder->base.name, phy_name); break; } } /* Try 5 times, else fail and try at lower BW */ if (tries == 5) { - intel_dp_dump_link_status(&i915->drm, link_status); + intel_dp_dump_link_status(intel_dp, dp_phy, link_status); drm_dbg_kms(&i915->drm, - "Channel equalization failed 5 times\n"); + "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n", + encoder->base.base.id, encoder->base.name, phy_name); } return channel_eq; @@ -774,7 +1018,7 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, intel_dp->link_trained = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); - intel_dp_program_link_training_pattern(intel_dp, crtc_state, + intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_DISABLE); } @@ -783,7 +1027,8 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy) { - struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; char phy_name[10]; bool ret = false; @@ -797,12 +1042,12 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp, out: drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n", - intel_connector->base.base.id, - intel_connector->base.name, + "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)), ret ? "passed" : "failed", - crtc_state->port_clock, crtc_state->lane_count, - intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); + crtc_state->port_clock, crtc_state->lane_count); return ret; } @@ -811,10 +1056,13 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; if (intel_dp->hobl_active) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, - "Link Training failed with HOBL active, not enabling it from now on"); + "[ENCODER:%d:%s] Link Training failed with HOBL active, " + "not enabling it from now on", + encoder->base.base.id, encoder->base.name); intel_dp->hobl_failed = true; } else if (intel_dp_get_link_train_fallback_values(intel_dp, crtc_state->port_clock, @@ -848,7 +1096,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, } if (ret) - intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); + ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); if (intel_dp->set_idle_link_train) intel_dp->set_idle_link_train(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 9d24d594368c..6a3a7b37349a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -19,6 +19,7 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const u8 link_status[DP_LINK_STATUS_SIZE]); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy, u8 dp_train_pat); void intel_dp_set_signal_levels(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 8d13d7b26a25..b8bc7d397c81 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -61,7 +61,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, int bpp, slots = -EINVAL; crtc_state->lane_count = limits->max_lane_count; - crtc_state->port_clock = limits->max_clock; + crtc_state->port_clock = limits->max_rate; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { crtc_state->pipe_bpp = bpp; @@ -131,8 +131,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, * for MST we always configure max link bw - the spec doesn't * seem to suggest we should do otherwise. */ - limits.min_clock = - limits.max_clock = intel_dp_max_link_rate(intel_dp); + limits.min_rate = + limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = limits.max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -231,6 +231,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; + int ret = 0; if (DISPLAY_VER(dev_priv) < 12) return 0; @@ -243,7 +244,6 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct intel_digital_connector_state *conn_iter_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; - int ret; if (connector_iter->mst_port != connector->mst_port || connector_iter == connector) @@ -252,8 +252,8 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, conn_iter_state = intel_atomic_get_digital_connector_state(state, connector_iter); if (IS_ERR(conn_iter_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(conn_iter_state); + ret = PTR_ERR(conn_iter_state); + break; } if (!conn_iter_state->base.crtc) @@ -262,20 +262,18 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, crtc = to_intel_crtc(conn_iter_state->base.crtc); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(crtc_state); + ret = PTR_ERR(crtc_state); + break; } ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); - if (ret) { - drm_connector_list_iter_end(&connector_list_iter); - return ret; - } + if (ret) + break; crtc_state->uapi.mode_changed = true; } drm_connector_list_iter_end(&connector_list_iter); - return 0; + return ret; } static int @@ -348,16 +346,6 @@ static void wait_for_act_sent(struct intel_encoder *encoder, drm_dp_check_act_status(&intel_dp->mst_mgr); } -static void intel_mst_pre_disable_dp(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); -} - static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -378,10 +366,13 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); if (ret) { drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); } static void intel_mst_post_disable_dp(struct intel_atomic_state *state, @@ -396,7 +387,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); bool last_mst_stream; - u32 val; intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; @@ -406,18 +396,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_crtc_vblank_off(old_crtc_state); - intel_disable_pipe(old_crtc_state); + intel_disable_transcoder(old_crtc_state); drm_dp_update_payload_part2(&intel_dp->mst_mgr); clear_act_sent(encoder, old_crtc_state); - val = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); - val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), - val); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), + TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0); wait_for_act_sent(encoder, old_crtc_state); @@ -523,7 +509,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_dp->active_mst_links++; - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); /* * Before Gen 12 this is not done as part of @@ -555,6 +541,17 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, clear_act_sent(encoder, pipe_config); + if (intel_dp_is_uhbr(pipe_config)) { + const struct drm_display_mode *adjusted_mode = + &pipe_config->hw.adjusted_mode; + u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock); + + intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder), + TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24)); + intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder), + TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff)); + } + intel_ddi_enable_transcoder_func(encoder, pipe_config); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0, @@ -571,7 +568,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, FECSTALL_DIS_DPTSTREAM_DPTTG); - intel_enable_pipe(pipe_config); + intel_enable_transcoder(pipe_config); intel_crtc_vblank_on(pipe_config); @@ -910,7 +907,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; - intel_encoder->pre_disable = intel_mst_pre_disable_dp; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->update_pipe = intel_ddi_update_pipe; @@ -971,24 +967,31 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) dig_port->max_lanes, max_source_rate, conn_base_id); - if (ret) + if (ret) { + intel_dp->mst_mgr.cbs = NULL; return ret; - - intel_dp->can_mst = true; + } return 0; } +bool intel_dp_mst_source_support(struct intel_dp *intel_dp) +{ + return intel_dp->mst_mgr.cbs; +} + void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) { struct intel_dp *intel_dp = &dig_port->dp; - if (!intel_dp->can_mst) + if (!intel_dp_mst_source_support(intel_dp)) return; drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); /* encoders will get killed by normal cleanup */ + + intel_dp->mst_mgr.cbs = NULL; } bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 6afda4e86b3c..f7301de6cdfb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -8,13 +8,15 @@ #include <linux/types.h> -struct intel_digital_port; struct intel_crtc_state; +struct intel_digital_port; +struct intel_dp; int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port); int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port); bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); +bool intel_dp_mst_source_support(struct intel_dp *intel_dp); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 48507ed79950..44edeb2e55c0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -21,12 +21,13 @@ * DEALINGS IN THE SOFTWARE. */ -#include "display/intel_dp.h" - +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dpio_phy.h" -#include "intel_sideband.h" +#include "vlv_sideband.h" /** * DOC: DPIO @@ -266,15 +267,22 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, *ch = DPIO_CH0; } -void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, - enum port port, u32 margin, u32 scale, - u32 enable, u32 deemphasis) +void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - u32 val; - enum dpio_phy phy; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int level = intel_ddi_level(encoder, crtc_state, 0); + const struct intel_ddi_buf_trans *trans; enum dpio_channel ch; + enum dpio_phy phy; + int n_entries; + u32 val; - bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + return; + + bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch); /* * While we write to the group register to program all lanes at once we @@ -286,12 +294,13 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch)); val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE); - val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT; + val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT | + trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT; intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch)); val &= ~SCALE_DCOMP_METHOD; - if (enable) + if (trans->entries[level].bxt.enable) val |= SCALE_DCOMP_METHOD; if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) @@ -302,7 +311,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch)); val &= ~DE_EMPHASIS; - val |= deemphasis << DEEMPH_SHIFT; + val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT; intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val); val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h index 6473440e7457..9c3d008e8e1a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h @@ -17,9 +17,8 @@ struct intel_encoder; void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, enum dpio_phy *phy, enum dpio_channel *ch); -void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, - enum port port, u32 margin, u32 scale, - u32 enable, u32 deemphasis); +void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c index 14515e62c05e..1ce0c171f4fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.c +++ b/drivers/gpu/drm/i915/display/intel_dpll.c @@ -2,16 +2,19 @@ /* * Copyright © 2020 Intel Corporation */ + #include <linux/kernel.h> + #include "intel_crtc.h" #include "intel_de.h" -#include "intel_display_types.h" #include "intel_display.h" +#include "intel_display_types.h" #include "intel_dpll.h" #include "intel_lvds.h" #include "intel_panel.h" -#include "intel_sideband.h" -#include "display/intel_snps_phy.h" +#include "intel_pps.h" +#include "intel_snps_phy.h" +#include "vlv_sideband.h" struct intel_limit { struct { @@ -309,7 +312,7 @@ int pnv_calc_dpll_params(int refclk, struct dpll *clock) return clock->dot; } -static u32 i9xx_dpll_compute_m(struct dpll *dpll) +static u32 i9xx_dpll_compute_m(const struct dpll *dpll) { return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } @@ -428,7 +431,8 @@ i9xx_select_p2_div(const struct intel_limit *limit, static bool i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, struct dpll *match_clock, + int target, int refclk, + const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; @@ -486,7 +490,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit, static bool pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, struct dpll *match_clock, + int target, int refclk, + const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; @@ -542,7 +547,8 @@ pnv_find_best_dpll(const struct intel_limit *limit, static bool g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, struct dpll *match_clock, + int target, int refclk, + const struct dpll *match_clock, struct dpll *best_clock) { struct drm_device *dev = crtc_state->uapi.crtc->dev; @@ -636,7 +642,8 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, static bool vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, struct dpll *match_clock, + int target, int refclk, + const struct dpll *match_clock, struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -696,7 +703,8 @@ vlv_find_best_dpll(const struct intel_limit *limit, static bool chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, struct dpll *match_clock, + int target, int refclk, + const struct dpll *match_clock, struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -763,47 +771,45 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, NULL, best_clock); } -static u32 pnv_dpll_compute_fp(struct dpll *dpll) +u32 i9xx_dpll_compute_fp(const struct dpll *dpll) +{ + return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; +} + +static u32 pnv_dpll_compute_fp(const struct dpll *dpll) { return (1 << dpll->n) << 16 | dpll->m2; } -static void i9xx_update_pll_dividers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 fp, fp2 = 0; + u32 fp, fp2; if (IS_PINEVIEW(dev_priv)) { - fp = pnv_dpll_compute_fp(&crtc_state->dpll); - if (reduced_clock) - fp2 = pnv_dpll_compute_fp(reduced_clock); + fp = pnv_dpll_compute_fp(clock); + fp2 = pnv_dpll_compute_fp(reduced_clock); } else { - fp = i9xx_dpll_compute_fp(&crtc_state->dpll); - if (reduced_clock) - fp2 = i9xx_dpll_compute_fp(reduced_clock); + fp = i9xx_dpll_compute_fp(clock); + fp2 = i9xx_dpll_compute_fp(reduced_clock); } crtc_state->dpll_hw_state.fp0 = fp; - - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && - reduced_clock) { - crtc_state->dpll_hw_state.fp1 = fp2; - } else { - crtc_state->dpll_hw_state.fp1 = fp; - } + crtc_state->dpll_hw_state.fp1 = fp2; } -static void i9xx_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; - struct dpll *clock = &crtc_state->dpll; - i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); + i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = DPLL_VGA_MODE_DIS; @@ -826,13 +832,17 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ - if (IS_PINEVIEW(dev_priv)) + if (IS_G4X(dev_priv)) { + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + } else if (IS_PINEVIEW(dev_priv)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; - else { + WARN_ON(reduced_clock->p1 != clock->p1); + } else { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (IS_G4X(dev_priv) && reduced_clock) - dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + WARN_ON(reduced_clock->p1 != clock->p1); } + switch (clock->p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; @@ -847,6 +857,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } + WARN_ON(reduced_clock->p2 != clock->p2); + if (DISPLAY_VER(dev_priv) >= 4) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); @@ -868,16 +880,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, } } -static void i8xx_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; - struct dpll *clock = &crtc_state->dpll; - i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); + i9xx_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = DPLL_VGA_MODE_DIS; @@ -891,6 +902,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, if (clock->p2 == 4) dpll |= PLL_P2_DIVIDE_BY_4; } + WARN_ON(reduced_clock->p1 != clock->p1); + WARN_ON(reduced_clock->p2 != clock->p2); /* * Bspec: @@ -918,42 +931,44 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, crtc_state->dpll_hw_state.dpll = dpll; } -static int hsw_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (IS_DG2(dev_priv)) { + if (IS_DG2(dev_priv)) return intel_mpllb_calc_state(crtc_state, encoder); - } else if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || - DISPLAY_VER(dev_priv) >= 11) { - if (!intel_reserve_shared_dplls(state, crtc, encoder)) { - drm_dbg_kms(&dev_priv->drm, - "failed to find PLL for pipe %c\n", - pipe_name(crtc->pipe)); - return -EINVAL; - } + + if (DISPLAY_VER(dev_priv) < 11 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + return 0; + + if (!intel_reserve_shared_dplls(state, crtc, encoder)) { + drm_dbg_kms(&dev_priv->drm, + "failed to find PLL for pipe %c\n", + pipe_name(crtc->pipe)); + return -EINVAL; } return 0; } -static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor) +static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor) { - return i9xx_dpll_compute_m(dpll) < factor * dpll->n; + return dpll->m < factor * dpll->n; } - -static void ilk_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct dpll *reduced_clock) +static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 dpll, fp, fp2; + u32 fp, fp2; int factor; /* Enable autotuning of the PLL clock (if permissible) */ @@ -968,19 +983,27 @@ static void ilk_compute_dpll(struct intel_crtc *crtc, factor = 20; } - fp = i9xx_dpll_compute_fp(&crtc_state->dpll); - - if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor)) + fp = i9xx_dpll_compute_fp(clock); + if (ilk_needs_fb_cb_tune(clock, factor)) fp |= FP_CB_TUNE; - if (reduced_clock) { - fp2 = i9xx_dpll_compute_fp(reduced_clock); + fp2 = i9xx_dpll_compute_fp(reduced_clock); + if (ilk_needs_fb_cb_tune(reduced_clock, factor)) + fp2 |= FP_CB_TUNE; - if (reduced_clock->m < factor * reduced_clock->n) - fp2 |= FP_CB_TUNE; - } else { - fp2 = fp; - } + crtc_state->dpll_hw_state.fp0 = fp; + crtc_state->dpll_hw_state.fp1 = fp2; +} + +static void ilk_compute_dpll(struct intel_crtc_state *crtc_state, + const struct dpll *clock, + const struct dpll *reduced_clock) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 dpll; + + ilk_update_pll_dividers(crtc_state, clock, reduced_clock); dpll = 0; @@ -1018,11 +1041,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ - dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; + dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ - dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; + dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; - switch (crtc_state->dpll.p2) { + switch (clock->p2) { case 5: dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; break; @@ -1036,6 +1059,7 @@ static void ilk_compute_dpll(struct intel_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } + WARN_ON(reduced_clock->p2 != clock->p2); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv)) @@ -1046,13 +1070,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll = dpll; - crtc_state->dpll_hw_state.fp0 = fp; - crtc_state->dpll_hw_state.fp1 = fp2; } -static int ilk_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); @@ -1097,7 +1119,8 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - ilk_compute_dpll(crtc, crtc_state, NULL); + ilk_compute_dpll(crtc_state, &crtc_state->dpll, + &crtc_state->dpll); if (!intel_reserve_shared_dplls(state, crtc, NULL)) { drm_dbg_kms(&dev_priv->drm, @@ -1109,41 +1132,42 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc, return 0; } -void vlv_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void vlv_compute_dpll(struct intel_crtc_state *crtc_state) { - pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (crtc->pipe != PIPE_A) - pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) - pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; - pipe_config->dpll_hw_state.dpll_md = - (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + crtc_state->dpll_hw_state.dpll_md = + (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } -void chv_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void chv_compute_dpll(struct intel_crtc_state *crtc_state) { - pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (crtc->pipe != PIPE_A) - pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; /* DPLL not used with DSI, but still need the rest set up */ - if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) - pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; - pipe_config->dpll_hw_state.dpll_md = - (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; + crtc_state->dpll_hw_state.dpll_md = + (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } -static int chv_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state) { int refclk = 100000; const struct intel_limit *limit = &intel_limits_chv; @@ -1159,13 +1183,12 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - chv_compute_dpll(crtc, crtc_state); + chv_compute_dpll(crtc_state); return 0; } -static int vlv_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state) { int refclk = 100000; const struct intel_limit *limit = &intel_limits_vlv; @@ -1181,14 +1204,14 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - vlv_compute_dpll(crtc, crtc_state); + vlv_compute_dpll(crtc_state); return 0; } -static int g4x_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 96000; @@ -1226,16 +1249,16 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - i9xx_compute_dpll(crtc, crtc_state, NULL); + i9xx_compute_dpll(crtc_state, &crtc_state->dpll, + &crtc_state->dpll); return 0; } -static int pnv_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 96000; @@ -1263,16 +1286,16 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - i9xx_compute_dpll(crtc, crtc_state, NULL); + i9xx_compute_dpll(crtc_state, &crtc_state->dpll, + &crtc_state->dpll); return 0; } -static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 96000; @@ -1300,16 +1323,16 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - i9xx_compute_dpll(crtc, crtc_state, NULL); + i9xx_compute_dpll(crtc_state, &crtc_state->dpll, + &crtc_state->dpll); return 0; } -static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 48000; @@ -1339,30 +1362,63 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, return -EINVAL; } - i8xx_compute_dpll(crtc, crtc_state, NULL); + i8xx_compute_dpll(crtc_state, &crtc_state->dpll, + &crtc_state->dpll); return 0; } +static const struct intel_dpll_funcs hsw_dpll_funcs = { + .crtc_compute_clock = hsw_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs ilk_dpll_funcs = { + .crtc_compute_clock = ilk_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs chv_dpll_funcs = { + .crtc_compute_clock = chv_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs vlv_dpll_funcs = { + .crtc_compute_clock = vlv_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs g4x_dpll_funcs = { + .crtc_compute_clock = g4x_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs pnv_dpll_funcs = { + .crtc_compute_clock = pnv_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs i9xx_dpll_funcs = { + .crtc_compute_clock = i9xx_crtc_compute_clock, +}; + +static const struct intel_dpll_funcs i8xx_dpll_funcs = { + .crtc_compute_clock = i8xx_crtc_compute_clock, +}; + void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv) { if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv)) - dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock; + dev_priv->dpll_funcs = &hsw_dpll_funcs; else if (HAS_PCH_SPLIT(dev_priv)) - dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock; + dev_priv->dpll_funcs = &ilk_dpll_funcs; else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; + dev_priv->dpll_funcs = &chv_dpll_funcs; else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; + dev_priv->dpll_funcs = &vlv_dpll_funcs; else if (IS_G4X(dev_priv)) - dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; + dev_priv->dpll_funcs = &g4x_dpll_funcs; else if (IS_PINEVIEW(dev_priv)) - dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; + dev_priv->dpll_funcs = &pnv_dpll_funcs; else if (DISPLAY_VER(dev_priv) != 2) - dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; + dev_priv->dpll_funcs = &i9xx_dpll_funcs; else - dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; + dev_priv->dpll_funcs = &i8xx_dpll_funcs; } static bool i9xx_has_pps(struct drm_i915_private *dev_priv) @@ -1373,34 +1429,37 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv) return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } -void i9xx_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state) +void i9xx_enable_pll(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc_state->dpll_hw_state.dpll; + enum pipe pipe = crtc->pipe; int i; - assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); + assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); /* PLL is protected by panel, make sure we can write it */ if (i9xx_has_pps(dev_priv)) - assert_panel_unlocked(dev_priv, crtc->pipe); + assert_pps_unlocked(dev_priv, pipe); + + intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0); + intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); - intel_de_write(dev_priv, reg, dpll); + intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); + intel_de_write(dev_priv, DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ - intel_de_posting_read(dev_priv, reg); + intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); if (DISPLAY_VER(dev_priv) >= 4) { - intel_de_write(dev_priv, DPLL_MD(crtc->pipe), + intel_de_write(dev_priv, DPLL_MD(pipe), crtc_state->dpll_hw_state.dpll_md); } else { /* The pixel multiplier can only be updated once the @@ -1408,13 +1467,13 @@ void i9xx_enable_pll(struct intel_crtc *crtc, * * So write it again. */ - intel_de_write(dev_priv, reg, dpll); + intel_de_write(dev_priv, DPLL(pipe), dpll); } /* We do this three times for luck */ for (i = 0; i < 3; i++) { - intel_de_write(dev_priv, reg, dpll); - intel_de_posting_read(dev_priv, reg); + intel_de_write(dev_priv, DPLL(pipe), dpll); + intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* wait for warmup */ } } @@ -1448,136 +1507,22 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); } -static void _vlv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); - intel_de_posting_read(dev_priv, DPLL(pipe)); - udelay(150); - - if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); -} - -void vlv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); - - /* PLL is protected by panel, make sure we can write it */ - assert_panel_unlocked(dev_priv, pipe); - - if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) - _vlv_enable_pll(crtc, pipe_config); - - intel_de_write(dev_priv, DPLL_MD(pipe), - pipe_config->dpll_hw_state.dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(pipe)); -} - - -static void _chv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 tmp; - - vlv_dpio_get(dev_priv); - - /* Enable back the 10bit clock to display controller */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); - tmp |= DPIO_DCLKP_EN; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); - - vlv_dpio_put(dev_priv); - - /* - * Need to wait > 100ns between dclkp clock enable bit and PLL enable. - */ - udelay(1); - - /* Enable PLL */ - intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); - - /* Check PLL is locked */ - if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); -} - -void chv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) +static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); - - /* PLL is protected by panel, make sure we can write it */ - assert_panel_unlocked(dev_priv, pipe); - - if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) - _chv_enable_pll(crtc, pipe_config); - - if (pipe != PIPE_A) { - /* - * WaPixelRepeatModeFixForC0:chv - * - * DPLLCMD is AWOL. Use chicken bits to propagate - * the value from DPLLBMD to either pipe B or C. - */ - intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); - intel_de_write(dev_priv, DPLL_MD(PIPE_B), - pipe_config->dpll_hw_state.dpll_md); - intel_de_write(dev_priv, CBR4_VLV, 0); - dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; - - /* - * DPLLB VGA mode also seems to cause problems. - * We should always have it disabled. - */ - drm_WARN_ON(&dev_priv->drm, - (intel_de_read(dev_priv, DPLL(PIPE_B)) & - DPLL_VGA_MODE_DIS) == 0); - } else { - intel_de_write(dev_priv, DPLL_MD(pipe), - pipe_config->dpll_hw_state.dpll_md); - intel_de_posting_read(dev_priv, DPLL_MD(pipe)); - } -} - -void vlv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; u32 mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; u32 coreclk, reg_val; - /* Enable Refclk */ - intel_de_write(dev_priv, DPLL(pipe), - pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); - - /* No need to actually set up the DPLL with DSI */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - vlv_dpio_get(dev_priv); - bestn = pipe_config->dpll.n; - bestm1 = pipe_config->dpll.m1; - bestm2 = pipe_config->dpll.m2; - bestp1 = pipe_config->dpll.p1; - bestp2 = pipe_config->dpll.p2; + bestn = crtc_state->dpll.n; + bestm1 = crtc_state->dpll.m1; + bestm2 = crtc_state->dpll.m2; + bestp1 = crtc_state->dpll.p1; + bestp2 = crtc_state->dpll.p2; /* See eDP HDMI DPIO driver vbios notes doc */ @@ -1614,16 +1559,16 @@ void vlv_prepare_pll(struct intel_crtc *crtc, vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); /* Set HBR and RBR LPF coefficients */ - if (pipe_config->port_clock == 162000 || - intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || - intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) + if (crtc_state->port_clock == 162000 || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x009f0003); else vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 0x00d0000f); - if (intel_crtc_has_dp_encoder(pipe_config)) { + if (intel_crtc_has_dp_encoder(crtc_state)) { /* Use SSC source */ if (pipe == PIPE_A) vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), @@ -1643,7 +1588,7 @@ void vlv_prepare_pll(struct intel_crtc *crtc, coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; - if (intel_crtc_has_dp_encoder(pipe_config)) + if (intel_crtc_has_dp_encoder(crtc_state)) coreclk |= 0x01000000; vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); @@ -1652,11 +1597,50 @@ void vlv_prepare_pll(struct intel_crtc *crtc, vlv_dpio_put(dev_priv); } -void chv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config) +static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); + intel_de_posting_read(dev_priv, DPLL(pipe)); + udelay(150); + + if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) + drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); +} + +void vlv_enable_pll(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); + + /* PLL is protected by panel, make sure we can write it */ + assert_pps_unlocked(dev_priv, pipe); + + /* Enable Refclk */ + intel_de_write(dev_priv, DPLL(pipe), + crtc_state->dpll_hw_state.dpll & + ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); + + if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { + vlv_prepare_pll(crtc_state); + _vlv_enable_pll(crtc_state); + } + + intel_de_write(dev_priv, DPLL_MD(pipe), + crtc_state->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); +} + +static void chv_prepare_pll(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 loopfilter, tribuf_calcntr; @@ -1664,21 +1648,13 @@ void chv_prepare_pll(struct intel_crtc *crtc, u32 dpio_val; int vco; - /* Enable Refclk and SSC */ - intel_de_write(dev_priv, DPLL(pipe), - pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); - - /* No need to actually set up the DPLL with DSI */ - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) - return; - - bestn = pipe_config->dpll.n; - bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; - bestm1 = pipe_config->dpll.m1; - bestm2 = pipe_config->dpll.m2 >> 22; - bestp1 = pipe_config->dpll.p1; - bestp2 = pipe_config->dpll.p2; - vco = pipe_config->dpll.vco; + bestn = crtc_state->dpll.n; + bestm2_frac = crtc_state->dpll.m2 & 0x3fffff; + bestm1 = crtc_state->dpll.m1; + bestm2 = crtc_state->dpll.m2 >> 22; + bestp1 = crtc_state->dpll.p1; + bestp2 = crtc_state->dpll.p2; + vco = crtc_state->dpll.vco; dpio_val = 0; loopfilter = 0; @@ -1757,6 +1733,83 @@ void chv_prepare_pll(struct intel_crtc *crtc, vlv_dpio_put(dev_priv); } +static void _chv_enable_pll(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + enum dpio_channel port = vlv_pipe_to_channel(pipe); + u32 tmp; + + vlv_dpio_get(dev_priv); + + /* Enable back the 10bit clock to display controller */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); + tmp |= DPIO_DCLKP_EN; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); + + vlv_dpio_put(dev_priv); + + /* + * Need to wait > 100ns between dclkp clock enable bit and PLL enable. + */ + udelay(1); + + /* Enable PLL */ + intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll); + + /* Check PLL is locked */ + if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) + drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); +} + +void chv_enable_pll(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); + + /* PLL is protected by panel, make sure we can write it */ + assert_pps_unlocked(dev_priv, pipe); + + /* Enable Refclk and SSC */ + intel_de_write(dev_priv, DPLL(pipe), + crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); + + if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) { + chv_prepare_pll(crtc_state); + _chv_enable_pll(crtc_state); + } + + if (pipe != PIPE_A) { + /* + * WaPixelRepeatModeFixForC0:chv + * + * DPLLCMD is AWOL. Use chicken bits to propagate + * the value from DPLLBMD to either pipe B or C. + */ + intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); + intel_de_write(dev_priv, DPLL_MD(PIPE_B), + crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, CBR4_VLV, 0); + dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md; + + /* + * DPLLB VGA mode also seems to cause problems. + * We should always have it disabled. + */ + drm_WARN_ON(&dev_priv->drm, + (intel_de_read(dev_priv, DPLL(PIPE_B)) & + DPLL_VGA_MODE_DIS) == 0); + } else { + intel_de_write(dev_priv, DPLL_MD(pipe), + crtc_state->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); + } +} + /** * vlv_force_pll_on - forcibly enable just the PLL * @dev_priv: i915 private structure @@ -1770,28 +1823,27 @@ void chv_prepare_pll(struct intel_crtc *crtc, int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - struct intel_crtc_state *pipe_config; + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc_state *crtc_state; - pipe_config = intel_crtc_state_alloc(crtc); - if (!pipe_config) + crtc_state = intel_crtc_state_alloc(crtc); + if (!crtc_state) return -ENOMEM; - pipe_config->cpu_transcoder = (enum transcoder)pipe; - pipe_config->pixel_multiplier = 1; - pipe_config->dpll = *dpll; + crtc_state->cpu_transcoder = (enum transcoder)pipe; + crtc_state->pixel_multiplier = 1; + crtc_state->dpll = *dpll; + crtc_state->output_types = BIT(INTEL_OUTPUT_EDP); if (IS_CHERRYVIEW(dev_priv)) { - chv_compute_dpll(crtc, pipe_config); - chv_prepare_pll(crtc, pipe_config); - chv_enable_pll(crtc, pipe_config); + chv_compute_dpll(crtc_state); + chv_enable_pll(crtc_state); } else { - vlv_compute_dpll(crtc, pipe_config); - vlv_prepare_pll(crtc, pipe_config); - vlv_enable_pll(crtc, pipe_config); + vlv_compute_dpll(crtc_state); + vlv_enable_pll(crtc_state); } - kfree(pipe_config); + kfree(crtc_state); return 0; } @@ -1801,7 +1853,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) u32 val; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, (enum transcoder)pipe); + assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_INTEGRATED_REF_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1818,7 +1870,7 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) u32 val; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, (enum transcoder)pipe); + assert_transcoder_disabled(dev_priv, (enum transcoder)pipe); val = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; @@ -1849,7 +1901,7 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) return; /* Make sure the pipe isn't still relying on us */ - assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); + assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); intel_de_posting_read(dev_priv, DPLL(pipe)); @@ -1871,3 +1923,25 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) else vlv_disable_pll(dev_priv, pipe); } + +/* Only for pre-ILK configs */ +static void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + bool cur_state; + + cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; + I915_STATE_WARN(cur_state != state, + "PLL state assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} + +void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_pll(i915, pipe, true); +} + +void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_pll(i915, pipe, false); +} diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h index 88247027fd5a..1af0ac43cca4 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll.h +++ b/drivers/gpu/drm/i915/display/intel_dpll.h @@ -18,29 +18,25 @@ void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv); int vlv_calc_dpll_params(int refclk, struct dpll *clock); int pnv_calc_dpll_params(int refclk, struct dpll *clock); int i9xx_calc_dpll_params(int refclk, struct dpll *clock); -void vlv_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -void chv_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +u32 i9xx_dpll_compute_fp(const struct dpll *dpll); +void vlv_compute_dpll(struct intel_crtc_state *crtc_state); +void chv_compute_dpll(struct intel_crtc_state *crtc_state); int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll); void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); -void i9xx_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state); -void vlv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); -void chv_enable_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); -void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); + +void chv_enable_pll(const struct intel_crtc_state *crtc_state); void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void vlv_enable_pll(const struct intel_crtc_state *crtc_state); +void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe); +void i9xx_enable_pll(const struct intel_crtc_state *crtc_state); void i9xx_disable_pll(const struct intel_crtc_state *crtc_state); -void vlv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); -void chv_prepare_pll(struct intel_crtc *crtc, - const struct intel_crtc_state *pipe_config); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); +void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe); + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 5c91d125a337..fc8fda77483a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -26,6 +26,8 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" +#include "intel_pch_refclk.h" +#include "intel_tc.h" /** * DOC: Display PLLs @@ -185,34 +187,6 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915, } /** - * intel_prepare_shared_dpll - call a dpll's prepare hook - * @crtc_state: CRTC, and its state, which has a shared dpll - * - * This calls the PLL's prepare hook if it has one and if the PLL is not - * already enabled. The prepare hook is platform specific. - */ -void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_shared_dpll *pll = crtc_state->shared_dpll; - - if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) - return; - - mutex_lock(&dev_priv->dpll.lock); - drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask); - if (!pll->active_mask) { - drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name); - drm_WARN_ON(&dev_priv->drm, pll->on); - assert_shared_dpll_disabled(dev_priv, pll); - - pll->info->funcs->prepare(dev_priv, pll); - } - mutex_unlock(&dev_priv->dpll.lock); -} - -/** * intel_enable_shared_dpll - enable a CRTC's shared DPLL * @crtc_state: CRTC, and its state, which has a shared DPLL * @@ -451,15 +425,6 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, return val & DPLL_VCO_ENABLE; } -static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - const enum intel_dpll_id id = pll->info->id; - - intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0); - intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1); -} - static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) { u32 val; @@ -481,6 +446,9 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, /* PCH refclock must be enabled first */ ibx_assert_pch_refclk_enabled(dev_priv); + intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0); + intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1); + intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); /* Wait for the clocks to stabilize. */ @@ -558,7 +526,6 @@ static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, } static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { - .prepare = ibx_pch_dpll_prepare, .enable = ibx_pch_dpll_enable, .disable = ibx_pch_dpll_disable, .get_hw_state = ibx_pch_dpll_get_hw_state, @@ -3136,8 +3103,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, enc_to_dig_port(encoder); if (primary_port && - (primary_port->tc_mode == TC_PORT_DP_ALT || - primary_port->tc_mode == TC_PORT_LEGACY)) + (intel_tc_port_in_dp_alt_mode(primary_port) || + intel_tc_port_in_legacy_mode(primary_port))) port_dpll_id = ICL_PORT_DPLL_MG_PHY; icl_set_active_port_dpll(crtc_state, port_dpll_id); @@ -3774,7 +3741,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, * domain. */ pll->wakeref = intel_display_power_get(dev_priv, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } icl_pll_power_enable(dev_priv, pll, enable_reg); @@ -3881,7 +3848,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv, if (IS_JSL_EHL(dev_priv) && pll->info->id == DPLL_ID_EHL_DPLL4) - intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, pll->wakeref); } @@ -4265,7 +4232,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, if (IS_JSL_EHL(i915) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { pll->wakeref = intel_display_power_get(i915, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } pll->state.pipe_mask = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 30e0aa5ca109..ef2889753807 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -27,7 +27,6 @@ #include <linux/types.h> -#include "intel_display.h" #include "intel_wakeref.h" /*FIXME: Move this to a more appropriate place. */ @@ -37,6 +36,7 @@ (void) (&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); }) +enum tc_port; struct drm_device; struct drm_i915_private; struct intel_atomic_state; @@ -256,16 +256,6 @@ struct intel_shared_dpll_state { */ struct intel_shared_dpll_funcs { /** - * @prepare: - * - * Optional hook to perform operations prior to enabling the PLL. - * Called from intel_prepare_shared_dpll() function unless the PLL - * is already enabled. - */ - void (*prepare)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - - /** * @enable: * * Hook for enabling the pll, called from intel_enable_shared_dpll() @@ -404,7 +394,6 @@ int intel_dpll_get_freq(struct drm_i915_private *i915, bool intel_dpll_get_hw_state(struct drm_i915_private *i915, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state); -void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c new file mode 100644 index 000000000000..963ca7155b06 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_display_types.h" +#include "intel_dpt.h" +#include "intel_fb.h" +#include "gt/gen8_ppgtt.h" + +struct i915_dpt { + struct i915_address_space vm; + + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void __iomem *iomem; +}; + +#define i915_is_dpt(vm) ((vm)->is_dpt) + +static inline struct i915_dpt * +i915_vm_to_dpt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_dpt, vm)); + GEM_BUG_ON(!i915_is_dpt(vm)); + return container_of(vm, struct i915_dpt, vm); +} + +#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) + +static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +{ + writeq(pte, addr); +} + +static void dpt_insert_page(struct i915_address_space *vm, + dma_addr_t addr, + u64 offset, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + + gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE, + vm->pte_encode(addr, level, flags)); +} + +static void dpt_insert_entries(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level level, + u32 flags) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + gen8_pte_t __iomem *base = dpt->iomem; + const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags); + struct sgt_iter sgt_iter; + dma_addr_t addr; + int i; + + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + + i = vma->node.start / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, sgt_iter, vma->pages) + gen8_set_pte(&base[i++], pte_encode | addr); +} + +static void dpt_clear_range(struct i915_address_space *vm, + u64 start, u64 length) +{ +} + +static void dpt_bind_vma(struct i915_address_space *vm, + struct i915_vm_pt_stash *stash, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + u32 pte_flags; + + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ + pte_flags = 0; + if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj)) + pte_flags |= PTE_READ_ONLY; + if (i915_gem_object_is_lmem(obj)) + pte_flags |= PTE_LM; + + vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; + + /* + * Without aliasing PPGTT there's no difference between + * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally + * upgrade to both bound if we bind either to avoid double-binding. + */ + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); +} + +static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) +{ + vm->clear_range(vm, vma->node.start, vma->size); +} + +static void dpt_cleanup(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_gem_object_put(dpt->obj); +} + +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + intel_wakeref_t wakeref; + struct i915_vma *vma; + void __iomem *iomem; + struct i915_gem_ww_ctx ww; + int err; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + atomic_inc(&i915->gpu_error.pending_fb_pin); + + for_i915_gem_ww(&ww, err, true) { + err = i915_gem_object_lock(dpt->obj, &ww); + if (err) + continue; + + vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096, + HAS_LMEM(i915) ? 0 : PIN_MAPPABLE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + continue; + } + + iomem = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + + if (IS_ERR(iomem)) { + err = PTR_ERR(iomem); + continue; + } + + dpt->vma = vma; + dpt->iomem = iomem; + + i915_vma_get(vma); + } + + atomic_dec(&i915->gpu_error.pending_fb_pin); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return err ? ERR_PTR(err) : vma; +} + +void intel_dpt_unpin(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vma_unpin_iomap(dpt->vma); + i915_vma_put(dpt->vma); +} + +/** + * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume + * @i915: device instance + * + * Restore the memory mapping during system resume for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. The content of these page + * tables are not stored in the hibernation image during S4 and S3RST->S4 + * transitions, so here we reprogram the PTE entries in those tables. + * + * This function must be called after the mappings in GGTT have been restored calling + * i915_ggtt_resume(). + */ +void intel_dpt_resume(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_resume_vm(fb->dpt_vm); + } + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + +/** + * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend + * @i915: device instance + * + * Suspend the memory mapping during system suspend for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. + * + * This function must be called before the mappings in GGTT are suspended calling + * i915_ggtt_suspend(). + */ +void intel_dpt_suspend(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_suspend_vm(fb->dpt_vm); + } + + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb) +{ + struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base; + struct drm_i915_private *i915 = to_i915(obj->dev); + struct drm_i915_gem_object *dpt_obj; + struct i915_address_space *vm; + struct i915_dpt *dpt; + size_t size; + int ret; + + if (intel_fb_needs_pot_stride_remap(fb)) + size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped); + else + size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE); + + size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE); + + if (HAS_LMEM(i915)) + dpt_obj = i915_gem_object_create_lmem(i915, size, 0); + else + dpt_obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(dpt_obj)) + return ERR_CAST(dpt_obj); + + ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE); + if (ret) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(ret); + } + + dpt = kzalloc(sizeof(*dpt), GFP_KERNEL); + if (!dpt) { + i915_gem_object_put(dpt_obj); + return ERR_PTR(-ENOMEM); + } + + vm = &dpt->vm; + + vm->gt = &i915->gt; + vm->i915 = i915; + vm->dma = i915->drm.dev; + vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; + vm->is_dpt = true; + + i915_address_space_init(vm, VM_CLASS_DPT); + + vm->insert_page = dpt_insert_page; + vm->clear_range = dpt_clear_range; + vm->insert_entries = dpt_insert_entries; + vm->cleanup = dpt_cleanup; + + vm->vma_ops.bind_vma = dpt_bind_vma; + vm->vma_ops.unbind_vma = dpt_unbind_vma; + vm->vma_ops.set_pages = ggtt_set_pages; + vm->vma_ops.clear_pages = clear_pages; + + vm->pte_encode = gen8_ggtt_pte_encode; + + dpt->obj = dpt_obj; + + return &dpt->vm; +} + +void intel_dpt_destroy(struct i915_address_space *vm) +{ + struct i915_dpt *dpt = i915_vm_to_dpt(vm); + + i915_vm_close(&dpt->vm); +} diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h new file mode 100644 index 000000000000..e18a9f767b11 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DPT_H__ +#define __INTEL_DPT_H__ + +struct drm_i915_private; + +struct i915_address_space; +struct i915_vma; +struct intel_framebuffer; + +void intel_dpt_destroy(struct i915_address_space *vm); +struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); +void intel_dpt_unpin(struct i915_address_space *vm); +void intel_dpt_suspend(struct drm_i915_private *i915); +void intel_dpt_resume(struct drm_i915_private *i915); +struct i915_address_space * +intel_dpt_create(struct intel_framebuffer *fb); + +#endif /* __INTEL_DPT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c new file mode 100644 index 000000000000..c1439fcb5a95 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_drrs.h" +#include "intel_panel.h" + +/** + * DOC: Display Refresh Rate Switching (DRRS) + * + * Display Refresh Rate Switching (DRRS) is a power conservation feature + * which enables swtching between low and high refresh rates, + * dynamically, based on the usage scenario. This feature is applicable + * for internal panels. + * + * Indication that the panel supports DRRS is given by the panel EDID, which + * would list multiple refresh rates for one resolution. + * + * DRRS is of 2 types - static and seamless. + * Static DRRS involves changing refresh rate (RR) by doing a full modeset + * (may appear as a blink on screen) and is used in dock-undock scenario. + * Seamless DRRS involves changing RR without any visual effect to the user + * and can be used during normal system usage. This is done by programming + * certain registers. + * + * Support for static/seamless DRRS may be indicated in the VBT based on + * inputs from the panel spec. + * + * DRRS saves power by switching to low RR based on usage scenarios. + * + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_drrs_invalidate() + * and intel_drrs_flush() are called. + * + * DRRS can be further extended to support other internal panels and also + * the scenario of video playback wherein RR is set based on the rate + * requested by userspace. + */ + +void +intel_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int pixel_clock; + + if (pipe_config->vrr.enable) + return; + + /* + * DRRS and PSR can't be enable together, so giving preference to PSR + * as it allows more power-savings by complete shutting down display, + * so to guarantee this, intel_drrs_compute_config() must be called + * after intel_psr_compute_config(). + */ + if (pipe_config->has_psr) + return; + + if (!intel_connector->panel.downclock_mode || + dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + pipe_config->has_drrs = true; + + pixel_clock = intel_connector->panel.downclock_mode->clock; + if (pipe_config->splitter.enable) + pixel_clock /= pipe_config->splitter.link_count; + + intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock, + pipe_config->port_clock, &pipe_config->dp_m2_n2, + constant_n, pipe_config->fec_enable); + + /* FIXME: abstract this better */ + if (pipe_config->splitter.enable) + pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; +} + +static void intel_drrs_set_state(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state, + enum drrs_refresh_rate_type refresh_type) +{ + struct intel_dp *intel_dp = dev_priv->drrs.dp; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_display_mode *mode; + + if (!intel_dp) { + drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); + return; + } + + if (!crtc) { + drm_dbg_kms(&dev_priv->drm, + "DRRS: intel_crtc not initialized\n"); + return; + } + + if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); + return; + } + + if (refresh_type == dev_priv->drrs.refresh_rate_type) + return; + + if (!crtc_state->hw.active) { + drm_dbg_kms(&dev_priv->drm, + "eDP encoder disabled. CRTC not Active\n"); + return; + } + + if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { + switch (refresh_type) { + case DRRS_HIGH_RR: + intel_dp_set_m_n(crtc_state, M1_N1); + break; + case DRRS_LOW_RR: + intel_dp_set_m_n(crtc_state, M2_N2); + break; + case DRRS_MAX_RR: + default: + drm_err(&dev_priv->drm, + "Unsupported refreshrate type\n"); + } + } else if (DISPLAY_VER(dev_priv) > 6) { + i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); + u32 val; + + val = intel_de_read(dev_priv, reg); + if (refresh_type == DRRS_LOW_RR) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val |= PIPECONF_EDP_RR_MODE_SWITCH; + } else { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val &= ~PIPECONF_EDP_RR_MODE_SWITCH; + } + intel_de_write(dev_priv, reg, val); + } + + dev_priv->drrs.refresh_rate_type = refresh_type; + + if (refresh_type == DRRS_LOW_RR) + mode = intel_dp->attached_connector->panel.downclock_mode; + else + mode = intel_dp->attached_connector->panel.fixed_mode; + drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", + drm_mode_vrefresh(mode)); +} + +static void +intel_drrs_enable_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + dev_priv->drrs.busy_frontbuffer_bits = 0; + dev_priv->drrs.dp = intel_dp; +} + +/** + * intel_drrs_enable - init drrs struct if supported + * @intel_dp: DP struct + * @crtc_state: A pointer to the active crtc state. + * + * Initializes frontbuffer_bits and drrs.dp + */ +void intel_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!crtc_state->has_drrs) + return; + + drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n"); + + mutex_lock(&dev_priv->drrs.mutex); + + if (dev_priv->drrs.dp) { + drm_warn(&dev_priv->drm, "DRRS already enabled\n"); + goto unlock; + } + + intel_drrs_enable_locked(intel_dp); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void +intel_drrs_disable_locked(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + intel_drrs_set_state(dev_priv, crtc_state, DRRS_HIGH_RR); + dev_priv->drrs.dp = NULL; +} + +/** + * intel_drrs_disable - Disable DRRS + * @intel_dp: DP struct + * @old_crtc_state: Pointer to old crtc_state. + * + */ +void intel_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!old_crtc_state->has_drrs) + return; + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + intel_drrs_disable_locked(intel_dp, old_crtc_state); + mutex_unlock(&dev_priv->drrs.mutex); + + cancel_delayed_work_sync(&dev_priv->drrs.work); +} + +/** + * intel_drrs_update - Update DRRS state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This function will update DRRS states, disabling or enabling DRRS when + * executing fastsets. For full modeset, intel_drrs_disable() and + * intel_drrs_enable() should be called instead. + */ +void +intel_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + return; + + mutex_lock(&dev_priv->drrs.mutex); + + /* New state matches current one? */ + if (crtc_state->has_drrs == !!dev_priv->drrs.dp) + goto unlock; + + if (crtc_state->has_drrs) + intel_drrs_enable_locked(intel_dp); + else + intel_drrs_disable_locked(intel_dp, crtc_state); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void intel_drrs_downclock_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), drrs.work.work); + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. + */ + + if (dev_priv->drrs.busy_frontbuffer_bits) + goto unlock; + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config, DRRS_LOW_RR); + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + bool invalidate) +{ + struct intel_dp *intel_dp; + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + if (!intel_dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + if (invalidate) + dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; + else + dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* flush/invalidate means busy screen hence upclock */ + if (frontbuffer_bits) + intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config, + DRRS_HIGH_RR); + + /* + * flush also means no more activity hence schedule downclock, if all + * other fbs are quiescent too + */ + if (!invalidate && !dev_priv->drrs.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->drrs.work, + msecs_to_jiffies(1000)); + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_drrs_invalidate - Disable Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called everytime rendering on the given planes start. + * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true); +} + +/** + * intel_drrs_flush - Restart Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed or flip on a crtc is completed. So DRRS should be upclocked + * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, + * if no other planes are dirty. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false); +} + +void intel_drrs_page_flip(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + unsigned int frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); + + intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false); +} + +/** + * intel_drrs_init - Init basic DRRS work and mutex. + * @connector: eDP connector + * @fixed_mode: preferred mode of panel + * + * This function is called only once at driver load to initialize basic + * DRRS stuff. + * + * Returns: + * Downclock mode if panel supports it, else return NULL. + * DRRS support is determined by the presence of downclock mode (apart + * from VBT setting). + */ +struct drm_display_mode * +intel_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_display_mode *downclock_mode = NULL; + + INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); + mutex_init(&dev_priv->drrs.mutex); + + if (DISPLAY_VER(dev_priv) <= 6) { + drm_dbg_kms(&dev_priv->drm, + "DRRS supported for Gen7 and above\n"); + return NULL; + } + + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { + drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); + return NULL; + } + + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); + if (!downclock_mode) { + drm_dbg_kms(&dev_priv->drm, + "Downclock mode is not found. DRRS not supported\n"); + return NULL; + } + + dev_priv->drrs.type = dev_priv->vbt.drrs_type; + + dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; + drm_dbg_kms(&dev_priv->drm, + "seamless DRRS supported for eDP panel.\n"); + return downclock_mode; +} diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h new file mode 100644 index 000000000000..9ec9c447211a --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_drrs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DRRS_H__ +#define __INTEL_DRRS_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; +struct intel_connector; +struct intel_dp; + +void intel_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_drrs_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_drrs_page_flip(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n); +struct drm_display_mode *intel_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode); + +#endif /* __INTEL_DRRS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 62a8a69f9f5d..83a69a4a4fea 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -100,7 +100,7 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state, u32 reg_val; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } buf = dsb->cmd_buf; @@ -177,7 +177,7 @@ void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state, dsb = crtc_state->dsb; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index f453ceb8d149..a50422e03a7e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -4,7 +4,10 @@ */ #include <drm/drm_mipi_dsi.h> + +#include "i915_drv.h" #include "intel_dsi.h" +#include "intel_panel.h" int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) { @@ -60,20 +63,19 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + enum drm_mode_status status; drm_dbg_kms(&dev_priv->drm, "\n"); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; - if (fixed_mode->clock > max_dotclk) - return MODE_CLOCK_HIGH; - } + status = intel_panel_mode_valid(intel_connector, mode); + if (status != MODE_OK) + return status; + + if (fixed_mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; return intel_mode_valid_max_plane_size(dev_priv, mode, false); } diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 50d6da0b2419..a3a906cb097e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -166,54 +166,15 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) return enc_to_intel_dsi(encoder)->ports; } -/* icl_dsi.c */ -void icl_dsi_init(struct drm_i915_private *dev_priv); -void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); - -/* intel_dsi.c */ int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); enum drm_panel_orientation intel_dsi_get_panel_orientation(struct intel_connector *connector); - -/* vlv_dsi.c */ -void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); -enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); int intel_dsi_get_modes(struct drm_connector *connector); enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, const struct mipi_dsi_host_ops *funcs, enum port port); -void vlv_dsi_init(struct drm_i915_private *dev_priv); - -/* vlv_dsi_pll.c */ -int vlv_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void vlv_dsi_pll_disable(struct intel_encoder *encoder); -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); -int bxt_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void bxt_dsi_pll_disable(struct intel_encoder *encoder); -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -/* intel_dsi_vbt.c */ -bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); -void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); -void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); -void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id); -void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); -void intel_dsi_log_params(struct intel_dsi *intel_dsi); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 584c14c4cbd0..7d234429e71e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -47,33 +47,46 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused { struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct intel_panel *panel = &connector->panel; struct mipi_dsi_device *dsi_device; - u8 data = 0; + u8 data[2] = {}; enum port port; + size_t len = panel->backlight.max > U8_MAX ? 2 : 1; - /* FIXME: Need to take care of 16 bit brightness level */ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, - &data, sizeof(data)); + &data, len); break; } - return data; + return (data[1] << 8) | data[0]; } static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); + struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; - u8 data = level; + u8 data[2] = {}; enum port port; + size_t len = panel->backlight.max > U8_MAX ? 2 : 1; + unsigned long mode_flags; + + if (len == 1) { + data[0] = level; + } else { + data[0] = level >> 8; + data[1] = level; + } - /* FIXME: Need to take care of 16 bit brightness level */ for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; + mode_flags = dsi_device->mode_flags; + dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, - &data, sizeof(data)); + &data, len); + dsi_device->mode_flags = mode_flags; } } @@ -147,10 +160,16 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, static int dcs_setup_backlight(struct intel_connector *connector, enum pipe unused) { + struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_panel *panel = &connector->panel; - panel->backlight.max = PANEL_PWM_MAX_VALUE; - panel->backlight.level = PANEL_PWM_MAX_VALUE; + if (dev_priv->vbt.backlight.brightness_precision_bits > 8) + panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1; + else + panel->backlight.max = PANEL_PWM_MAX_VALUE; + + panel->backlight.level = panel->backlight.max; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index c2a2cd1f84dc..0da91849efde 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -31,7 +31,6 @@ #include <linux/pinctrl/machine.h> #include <linux/slab.h> -#include <asm/intel-mid.h> #include <asm/unaligned.h> #include <drm/drm_crtc.h> @@ -42,7 +41,9 @@ #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dsi.h" -#include "intel_sideband.h" +#include "intel_dsi_vbt.h" +#include "vlv_dsi.h" +#include "vlv_sideband.h" #define MIPI_TRANSFER_MODE_SHIFT 0 #define MIPI_VIRTUAL_CHANNEL_SHIFT 1 diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h new file mode 100644 index 000000000000..dc642c1fe7ef --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DSI_VBT_H__ +#define __INTEL_DSI_VBT_H__ + +#include <linux/types.h> + +enum mipi_seq; +struct intel_dsi; + +bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id); +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); +void intel_dsi_log_params(struct intel_dsi *intel_dsi); + +#endif /* __INTEL_DSI_VBT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 77419f8c05e9..2eeb209afc64 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -223,9 +223,10 @@ static enum drm_mode_status intel_dvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector)); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector); const struct drm_display_mode *fixed_mode = - to_intel_connector(connector)->panel.fixed_mode; + intel_connector->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; int target_clock = mode->clock; @@ -235,10 +236,11 @@ intel_dvo_mode_valid(struct drm_connector *connector, /* XXX: Validate clock range */ if (fixed_mode) { - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; + enum drm_mode_status status; + + status = intel_panel_mode_valid(intel_connector, mode); + if (status != MODE_OK) + return status; target_clock = fixed_mode->clock; } @@ -254,6 +256,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state) { struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *fixed_mode = intel_dvo->attached_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -264,8 +267,13 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - if (fixed_mode) - intel_fixed_panel_mode(fixed_mode, adjusted_mode); + if (fixed_mode) { + int ret; + + ret = intel_panel_compute_config(connector, adjusted_mode); + if (ret) + return ret; + } if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index c60a81a81c09..23cfe2e5ce2a 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -4,33 +4,475 @@ */ #include <drm/drm_framebuffer.h> +#include <drm/drm_modeset_helper.h> +#include "i915_drv.h" #include "intel_display.h" #include "intel_display_types.h" +#include "intel_dpt.h" #include "intel_fb.h" #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) +/* + * From the Sky Lake PRM: + * "The Color Control Surface (CCS) contains the compression status of + * the cache-line pairs. The compression state of the cache-line pair + * is specified by 2 bits in the CCS. Each CCS cache-line represents + * an area on the main surface of 16 x16 sets of 128 byte Y-tiled + * cache-line-pairs. CCS is always Y tiled." + * + * Since cache line pairs refers to horizontally adjacent cache lines, + * each cache line in the CCS corresponds to an area of 32x16 cache + * lines on the main surface. Since each pixel is 4 bytes, this gives + * us a ratio of one byte in the CCS for each 8x16 pixels in the + * main surface. + */ +static const struct drm_format_info skl_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, +}; + +/* + * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the + * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles + * in the main surface. With 4 byte pixels and each Y-tile having dimensions of + * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in + * the main surface. + */ +static const struct drm_format_info gen12_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUYV, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .num_planes = 4, + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P012, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P016, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, +}; + +/* + * Same as gen12_ccs_formats[] above, but with additional surface used + * to pass Clear Color information in plane 2 with 64 bits of data. + */ +static const struct drm_format_info gen12_ccs_cc_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, +}; + +struct intel_modifier_desc { + u64 modifier; + struct { + u8 from; + u8 until; + } display_ver; +#define DISPLAY_VER_ALL { 0, -1 } + + const struct drm_format_info *formats; + int format_count; +#define FORMAT_OVERRIDE(format_list) \ + .formats = format_list, \ + .format_count = ARRAY_SIZE(format_list) + + u8 plane_caps; + + struct { + u8 cc_planes:3; + u8 packed_aux_planes:4; + u8 planar_aux_planes:4; + } ccs; +}; + +#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \ + INTEL_PLANE_CAP_CCS_RC_CC | \ + INTEL_PLANE_CAP_CCS_MC) +#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \ + INTEL_PLANE_CAP_TILING_Y | \ + INTEL_PLANE_CAP_TILING_Yf) +#define INTEL_PLANE_CAP_TILING_NONE 0 + +static const struct intel_modifier_desc intel_modifiers[] = { + { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC, + + .ccs.packed_aux_planes = BIT(1), + .ccs.planar_aux_planes = BIT(2) | BIT(3), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC, + + .ccs.cc_planes = BIT(2), + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_cc_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf, + }, { + .modifier = I915_FORMAT_MOD_Y_TILED, + .display_ver = { 9, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y, + }, { + .modifier = I915_FORMAT_MOD_X_TILED, + .display_ver = DISPLAY_VER_ALL, + .plane_caps = INTEL_PLANE_CAP_TILING_X, + }, { + .modifier = DRM_FORMAT_MOD_LINEAR, + .display_ver = DISPLAY_VER_ALL, + }, +}; + +static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) + if (intel_modifiers[i].modifier == modifier) + return &intel_modifiers[i]; + + return NULL; +} + +static const struct intel_modifier_desc *lookup_modifier(u64 modifier) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); + + if (WARN_ON(!md)) + return &intel_modifiers[0]; + + return md; +} + +static const struct drm_format_info * +lookup_format_info(const struct drm_format_info formats[], + int num_formats, u32 format) +{ + int i; + + for (i = 0; i < num_formats; i++) { + if (formats[i].format == format) + return &formats[i]; + } + + return NULL; +} + +/** + * intel_fb_get_format_info: Get a modifier specific format information + * @cmd: FB add command structure + * + * Returns: + * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], + * or %NULL if the modifier doesn't override the format. + */ +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); + + if (!md || !md->formats) + return NULL; + + return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); +} + +static bool plane_caps_contain_any(u8 caps, u8 mask) +{ + return caps & mask; +} + +static bool plane_caps_contain_all(u8 caps, u8 mask) +{ + return (caps & mask) == mask; +} + +/** + * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render, render with color clear or + * media compression modifier. + */ +bool intel_fb_is_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MASK); +} + +/** + * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render with color clear modifier. + */ +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_RC_CC); +} + +/** + * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a media compression modifier. + */ +bool intel_fb_is_mc_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MC); +} + +static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, + u8 display_ver_from, u8 display_ver_until) +{ + return md->display_ver.from <= display_ver_until && + display_ver_from <= md->display_ver.until; +} + +static bool plane_has_modifier(struct drm_i915_private *i915, + u8 plane_caps, + const struct intel_modifier_desc *md) +{ + if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until)) + return false; + + if (!plane_caps_contain_all(plane_caps, md->plane_caps)) + return false; + + return true; +} + +/** + * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities + * @i915: i915 device instance + * @plane_caps: capabilities for the plane the modifiers are queried for + * + * Returns: + * Returns the list of modifiers allowed by the @i915 platform and @plane_caps. + * The caller must free the returned buffer. + */ +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps) +{ + u64 *list, *p; + int count = 1; /* +1 for invalid modifier terminator */ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + count++; + } + + list = kmalloc_array(count, sizeof(*list), GFP_KERNEL); + if (drm_WARN_ON(&i915->drm, !list)) + return NULL; + + p = list; + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + *p++ = intel_modifiers[i].modifier; + } + *p++ = DRM_FORMAT_MOD_INVALID; + + return list; +} + +/** + * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane + * @plane: Plane to check the modifier support for + * @modifier: The modifier to check the support for + * + * Returns: + * %true if the @modifier is supported on @plane. + */ +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier) { - if (!is_ccs_modifier(fb->modifier)) + int i; + + for (i = 0; i < plane->base.modifier_count; i++) + if (plane->base.modifiers[i] == modifier) + return true; + + return false; +} + +static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md, + const struct drm_format_info *info) +{ + int yuv_planes; + + if (!info->is_yuv) return false; - return plane >= fb->format->num_planes / 2; + if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK)) + yuv_planes = 4; + else + yuv_planes = 2; + + return info->num_planes == yuv_planes; } -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) +/** + * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar + * @info: format to check + * @modifier: modifier used with the format + * + * Returns: + * %true if @info / @modifier is YUV semiplanar. + */ +bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier) { - return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); + return format_is_yuv_semiplanar(lookup_modifier(modifier), info); } -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) +static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md, + const struct drm_format_info *format) { - return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && - plane == 2; + if (format_is_yuv_semiplanar(md, format)) + return md->ccs.planar_aux_planes; + else + return md->ccs.packed_aux_planes; } -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) +/** + * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane. + */ +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); +} + +/** + * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane. + */ +static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return check_modifier_display_ver_range(md, 12, 13) && + ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); +} + +/** + * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer + * @fb: Framebuffer + * + * Returns: + * Returns the index of the color clear plane for @fb, or -1 if @fb is not a + * framebuffer using a render compression/color clear modifier. + */ +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + if (!md->ccs.cc_planes) + return -1; + + drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1); + + return ilog2((int)md->ccs.cc_planes); +} + +static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane) +{ + return intel_fb_rc_ccs_cc_plane(fb) == color_plane; +} + +static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) { return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && color_plane == 1; @@ -39,12 +481,13 @@ bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) { return fb->modifier == DRM_FORMAT_MOD_LINEAR || - is_gen12_ccs_plane(fb, color_plane); + intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane); } int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || (main_plane && main_plane >= fb->format->num_planes / 2)); return fb->format->num_planes / 2 + main_plane; @@ -52,7 +495,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || ccs_plane < fb->format->num_planes / 2); if (is_gen12_ccs_cc_plane(fb, ccs_plane)) @@ -61,11 +504,20 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) return ccs_plane - fb->format->num_planes / 2; } +static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane) +{ + int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane); + unsigned int main_stride = fb->base.pitches[main_plane]; + unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane); + + return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64; +} + int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { struct drm_i915_private *i915 = to_i915(fb->dev); - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); else if (DISPLAY_VER(i915) < 11 && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) @@ -79,16 +531,71 @@ unsigned int intel_tile_size(const struct drm_i915_private *i915) return DISPLAY_VER(i915) == 2 ? 2048 : 4096; } -unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane) +unsigned int +intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) { - if (is_gen12_ccs_plane(fb, color_plane)) - return 1; + struct drm_i915_private *dev_priv = to_i915(fb->dev); + unsigned int cpp = fb->format->cpp[color_plane]; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + return intel_tile_size(dev_priv); + case I915_FORMAT_MOD_X_TILED: + if (DISPLAY_VER(dev_priv) == 2) + return 128; + else + return 512; + case I915_FORMAT_MOD_Y_TILED_CCS: + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) + return 128; + fallthrough; + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + if (intel_fb_is_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane)) + return 64; + fallthrough; + case I915_FORMAT_MOD_Y_TILED: + if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv)) + return 128; + else + return 512; + case I915_FORMAT_MOD_Yf_TILED_CCS: + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) + return 128; + fallthrough; + case I915_FORMAT_MOD_Yf_TILED: + switch (cpp) { + case 1: + return 64; + case 2: + case 4: + return 128; + case 8: + case 16: + return 256; + default: + MISSING_CASE(cpp); + return cpp; + } + break; + default: + MISSING_CASE(fb->modifier); + return cpp; + } +} +unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane) +{ return intel_tile_size(to_i915(fb->dev)) / intel_tile_width_bytes(fb, color_plane); } -/* Return the tile dimensions in pixel units */ +/* + * Return the tile dimensions in pixel units, based on the (2 or 4 kbyte) GTT + * page tile size. + */ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, unsigned int *tile_width, unsigned int *tile_height) @@ -100,6 +607,21 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane, *tile_height = intel_tile_height(fb, color_plane); } +/* + * Return the tile dimensions in pixel units, based on the tile block size. + * The block covers the full GTT page sized tile on all tiled surfaces and + * it's a 64 byte portion of the tile on TGL+ CCS surfaces. + */ +static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_plane, + unsigned int *tile_width, + unsigned int *tile_height) +{ + intel_tile_dims(fb, color_plane, tile_width, tile_height); + + if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) + *tile_height = 1; +} + unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane) { unsigned int tile_width, tile_height; @@ -109,6 +631,44 @@ unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_pla return fb->pitches[color_plane] * tile_height; } +unsigned int +intel_fb_align_height(const struct drm_framebuffer *fb, + int color_plane, unsigned int height) +{ + unsigned int tile_height = intel_tile_height(fb, color_plane); + + return ALIGN(height, tile_height); +} + +static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) +{ + u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & + INTEL_PLANE_CAP_TILING_MASK; + + switch (tiling_caps) { + case INTEL_PLANE_CAP_TILING_Y: + return I915_TILING_Y; + case INTEL_PLANE_CAP_TILING_X: + return I915_TILING_X; + case INTEL_PLANE_CAP_TILING_Yf: + case INTEL_PLANE_CAP_TILING_NONE: + return I915_TILING_NONE; + default: + MISSING_CASE(tiling_caps); + return I915_TILING_NONE; + } +} + +static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) +{ + return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; +} + +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) +{ + return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); +} + unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) { if (IS_I830(i915)) @@ -121,6 +681,70 @@ unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) return 4 * 1024; } +static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 9) + return 256 * 1024; + else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + return 128 * 1024; + else if (DISPLAY_VER(dev_priv) >= 4) + return 4 * 1024; + else + return 0; +} + +unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(fb->dev); + + if (intel_fb_uses_dpt(fb)) + return 512 * 4096; + + /* AUX_DIST needs only 4K alignment */ + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) + return 4096; + + if (is_semiplanar_uv_plane(fb, color_plane)) { + /* + * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes + * alignment for linear UV planes on all platforms. + */ + if (DISPLAY_VER(dev_priv) >= 12) { + if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + return intel_linear_alignment(dev_priv); + + return intel_tile_row_size(fb, color_plane); + } + + return 4096; + } + + drm_WARN_ON(&dev_priv->drm, color_plane != 0); + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + return intel_linear_alignment(dev_priv); + case I915_FORMAT_MOD_X_TILED: + if (HAS_ASYNC_FLIPS(dev_priv)) + return 256 * 1024; + return 0; + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + return 16 * 1024; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + return 1 * 1024 * 1024; + default: + MISSING_CASE(fb->modifier); + return 0; + } +} + void intel_fb_plane_get_subsampling(int *hsub, int *vsub, const struct drm_framebuffer *fb, int color_plane) @@ -138,7 +762,7 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, * TODO: Deduct the subsampling from the char block for all CCS * formats and planes. */ - if (!is_gen12_ccs_plane(fb, color_plane)) { + if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) { *hsub = fb->format->hsub; *vsub = fb->format->vsub; @@ -165,15 +789,18 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h) { - int main_plane = is_ccs_plane(&fb->base, color_plane) ? + int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ? skl_ccs_to_main_plane(&fb->base, color_plane) : 0; + unsigned int main_width = fb->base.width; + unsigned int main_height = fb->base.height; int main_hsub, main_vsub; int hsub, vsub; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); - *w = fb->base.width / main_hsub / hsub; - *h = fb->base.height / main_vsub / vsub; + + *w = DIV_ROUND_UP(main_width, main_hsub * hsub); + *h = DIV_ROUND_UP(main_height, main_vsub * vsub); } static u32 intel_adjust_tile_offset(int *x, int *y, @@ -203,6 +830,20 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } +static u32 intel_adjust_linear_offset(int *x, int *y, + unsigned int cpp, + unsigned int pitch, + u32 old_offset, + u32 new_offset) +{ + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + + return new_offset; +} + static u32 intel_adjust_aligned_offset(int *x, int *y, const struct drm_framebuffer *fb, int color_plane, @@ -233,10 +874,8 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, tile_size, pitch_tiles, old_offset, new_offset); } else { - old_offset += *y * pitch + *x * cpp; - - *y = (old_offset - new_offset) / pitch; - *x = ((old_offset - new_offset) - *y * pitch) / cpp; + intel_adjust_linear_offset(x, y, cpp, pitch, + old_offset, new_offset); } return new_offset; @@ -253,7 +892,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y, { return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, state->hw.rotation, - state->view.color_plane[color_plane].stride, + state->view.color_plane[color_plane].mapping_stride, old_offset, new_offset); } @@ -334,7 +973,7 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y, struct drm_i915_private *i915 = to_i915(intel_plane->base.dev); const struct drm_framebuffer *fb = state->hw.fb; unsigned int rotation = state->hw.rotation; - int pitch = state->view.color_plane[color_plane].stride; + int pitch = state->view.color_plane[color_plane].mapping_stride; u32 alignment; if (intel_plane->id == PLANE_CURSOR) @@ -355,17 +994,9 @@ static int intel_fb_offset_to_xy(int *x, int *y, unsigned int height; u32 alignment; - /* - * All DPT color planes must be 512*4k aligned (the amount mapped by a - * single DPT page). For ADL_P CCS FBs this only works by requiring - * the allocated offsets to be 2MB aligned. Once supoort to remap - * such FBs is added we can remove this requirement, as then all the - * planes can be remapped to an aligned offset. - */ - if (IS_ALDERLAKE_P(i915) && is_ccs_modifier(fb->modifier)) - alignment = 512 * 4096; - else if (DISPLAY_VER(i915) >= 12 && - is_semiplanar_uv_plane(fb, color_plane)) + if (DISPLAY_VER(i915) >= 12 && + !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && + is_semiplanar_uv_plane(fb, color_plane)) alignment = intel_tile_row_size(fb, color_plane); else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) alignment = intel_tile_size(i915); @@ -413,10 +1044,15 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane int ccs_x, ccs_y; int main_x, main_y; - if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) return 0; - intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height); + /* + * While all the tile dimensions are based on a 2k or 4k GTT page size + * here the main and CCS coordinates must match only within a (64 byte + * on TGL+) block inside the tile. + */ + intel_tile_block_dims(fb, ccs_plane, &tile_width, &tile_height); intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane); tile_width *= hsub; @@ -471,7 +1107,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state) * The new CCS hash mode isn't compatible with remapping as * the virtual address of the pages affects the compressed data. */ - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return false; /* Linear needs a page aligned stride for remapping */ @@ -491,18 +1127,17 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) { struct drm_i915_private *i915 = to_i915(fb->base.dev); - return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR && - !is_ccs_modifier(fb->base.modifier); + return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR; } static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) { if (drm_rotation_90_or_270(rotation)) - return fb->rotated_view.color_plane[color_plane].stride; + return fb->rotated_view.color_plane[color_plane].mapping_stride; else if (intel_fb_needs_pot_stride_remap(fb)) - return fb->remapped_view.color_plane[color_plane].stride; + return fb->remapped_view.color_plane[color_plane].mapping_stride; else - return fb->normal_view.color_plane[color_plane].stride; + return fb->normal_view.color_plane[color_plane].mapping_stride; } static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) @@ -612,14 +1247,30 @@ static unsigned int plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane, unsigned int pitch_tiles) { - if (intel_fb_needs_pot_stride_remap(fb)) + if (intel_fb_needs_pot_stride_remap(fb)) { /* * ADL_P, the only platform needing a POT stride has a minimum - * of 8 stride tiles. + * of 8 main surface tiles. */ return roundup_pow_of_two(max(pitch_tiles, 8u)); - else + } else { return pitch_tiles; + } +} + +static unsigned int +plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, + unsigned int tile_width, + unsigned int src_stride_tiles, unsigned int dst_stride_tiles) +{ + unsigned int stride_tiles; + + if (IS_ALDERLAKE_P(to_i915(fb->base.dev))) + stride_tiles = src_stride_tiles; + else + stride_tiles = dst_stride_tiles; + + return stride_tiles * tile_width * fb->base.format->cpp[color_plane]; } static unsigned int @@ -638,11 +1289,31 @@ plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane, return DIV_ROUND_UP(y + dims->height, dims->tile_height); } +static unsigned int +plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane, + const struct fb_plane_view_dims *dims, + int x, int y) +{ + struct drm_i915_private *i915 = to_i915(fb->base.dev); + unsigned int size; + + size = (y + dims->height) * fb->base.pitches[color_plane] + + x * fb->base.format->cpp[color_plane]; + + return DIV_ROUND_UP(size, intel_tile_size(i915)); +} + #define assign_chk_ovf(i915, var, val) ({ \ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \ (var) = (val); \ }) +#define assign_bfld_chk_ovf(i915, var, val) ({ \ + (var) = (val); \ + drm_WARN_ON(&(i915)->drm, (var) != (val)); \ + (var); \ +}) + static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, u32 obj_offset, u32 gtt_offset, int x, int y, @@ -655,14 +1326,28 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p unsigned int tile_height = dims->tile_height; unsigned int tile_size = intel_tile_size(i915); struct drm_rect r; - u32 size; + u32 size = 0; + + assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset); - assign_chk_ovf(i915, remap_info->offset, obj_offset); - assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); - assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); - assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); + if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) { + remap_info->linear = 1; + + assign_chk_ovf(i915, remap_info->size, + plane_view_linear_tiles(fb, color_plane, dims, x, y)); + } else { + remap_info->linear = 0; + + assign_chk_ovf(i915, remap_info->src_stride, + plane_view_src_stride_tiles(fb, color_plane, dims)); + assign_chk_ovf(i915, remap_info->width, + plane_view_width_tiles(fb, color_plane, dims, x)); + assign_chk_ovf(i915, remap_info->height, + plane_view_height_tiles(fb, color_plane, dims, y)); + } if (view->gtt.type == I915_GGTT_VIEW_ROTATED) { + drm_WARN_ON(&i915->drm, remap_info->linear); check_array_bounds(i915, view->gtt.rotated.plane, color_plane); assign_chk_ovf(i915, remap_info->dst_stride, @@ -678,9 +1363,10 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p color_plane_info->x = r.x1; color_plane_info->y = r.y1; - color_plane_info->stride = remap_info->dst_stride * tile_height; + color_plane_info->mapping_stride = remap_info->dst_stride * tile_height; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; - size = remap_info->dst_stride * remap_info->width; + size += remap_info->dst_stride * remap_info->width; /* rotate the tile dimensions to match the GTT view */ swap(tile_width, tile_height); @@ -689,16 +1375,37 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p check_array_bounds(i915, view->gtt.remapped.plane, color_plane); - assign_chk_ovf(i915, remap_info->dst_stride, - plane_view_dst_stride_tiles(fb, color_plane, remap_info->width)); + if (view->gtt.remapped.plane_alignment) { + unsigned int aligned_offset = ALIGN(gtt_offset, + view->gtt.remapped.plane_alignment); + + size += aligned_offset - gtt_offset; + gtt_offset = aligned_offset; + } color_plane_info->x = x; color_plane_info->y = y; - color_plane_info->stride = remap_info->dst_stride * tile_width * - fb->base.format->cpp[color_plane]; + if (remap_info->linear) { + color_plane_info->mapping_stride = fb->base.pitches[color_plane]; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; - size = remap_info->dst_stride * remap_info->height; + size += remap_info->size; + } else { + unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, + remap_info->width); + + assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); + color_plane_info->mapping_stride = dst_stride * + tile_width * + fb->base.format->cpp[color_plane]; + color_plane_info->scanout_stride = + plane_view_scanout_stride(fb, color_plane, tile_width, + remap_info->src_stride, + dst_stride); + + size += dst_stride * remap_info->height; + } } /* @@ -706,10 +1413,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p * the x/y offsets. x,y will hold the first pixel of the framebuffer * plane from the start of the remapped/rotated gtt mapping. */ - intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, - tile_width, tile_height, - tile_size, remap_info->dst_stride, - gtt_offset * tile_size, 0); + if (remap_info->linear) + intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y, + fb->base.format->cpp[color_plane], + color_plane_info->mapping_stride, + gtt_offset * tile_size, 0); + else + intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, + tile_width, tile_height, + tile_size, remap_info->dst_stride, + gtt_offset * tile_size, 0); return size; } @@ -722,15 +1435,10 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x, int y) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int tiles; if (is_surface_linear(&fb->base, color_plane)) { - unsigned int size; - - size = (y + dims->height) * fb->base.pitches[color_plane] + - x * fb->base.format->cpp[color_plane]; - tiles = DIV_ROUND_UP(size, intel_tile_size(i915)); + tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y); } else { tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * plane_view_height_tiles(fb, color_plane, dims, y); @@ -745,10 +1453,14 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, return tiles; } -static void intel_fb_view_init(struct intel_fb_view *view, enum i915_ggtt_view_type view_type) +static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view, + enum i915_ggtt_view_type view_type) { memset(view, 0, sizeof(*view)); view->gtt.type = view_type; + + if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915)) + view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE; } bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb) @@ -769,16 +1481,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * int i, num_planes = fb->base.format->num_planes; unsigned int tile_size = intel_tile_size(i915); - intel_fb_view_init(&fb->normal_view, I915_GGTT_VIEW_NORMAL); + intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL); drm_WARN_ON(&i915->drm, intel_fb_supports_90_270_rotation(fb) && intel_fb_needs_pot_stride_remap(fb)); if (intel_fb_supports_90_270_rotation(fb)) - intel_fb_view_init(&fb->rotated_view, I915_GGTT_VIEW_ROTATED); + intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED); if (intel_fb_needs_pot_stride_remap(fb)) - intel_fb_view_init(&fb->remapped_view, I915_GGTT_VIEW_REMAPPED); + intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED); for (i = 0; i < num_planes; i++) { struct fb_plane_view_dims view_dims; @@ -815,7 +1527,9 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * */ fb->normal_view.color_plane[i].x = x; fb->normal_view.color_plane[i].y = y; - fb->normal_view.color_plane[i].stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].scanout_stride = + fb->normal_view.color_plane[i].mapping_stride; offset = calc_plane_aligned_offset(fb, i, &x, &y); @@ -856,7 +1570,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) unsigned int src_w, src_h; u32 gtt_offset = 0; - intel_fb_view_init(&plane_state->view, + intel_fb_view_init(i915, &plane_state->view, drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED); @@ -865,7 +1579,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier)); + drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ drm_rect_translate(&plane_state->uapi.src, @@ -918,6 +1632,71 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio *view = fb->normal_view; } +static +u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, + u32 pixel_format, u64 modifier) +{ + /* + * Arbitrary limit for gen4+ chosen to match the + * render engine max stride. + * + * The new CCS hash mode makes remapping impossible + */ + if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || + intel_modifier_uses_dpt(dev_priv, modifier)) + return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); + else if (DISPLAY_VER(dev_priv) >= 7) + return 256 * 1024; + else + return 128 * 1024; +} + +static u32 +intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(fb->dev); + u32 tile_width; + + if (is_surface_linear(fb, color_plane)) { + u32 max_stride = intel_plane_fb_max_stride(dev_priv, + fb->format->format, + fb->modifier); + + /* + * To make remapping with linear generally feasible + * we need the stride to be page aligned. + */ + if (fb->pitches[color_plane] > max_stride && + !intel_fb_is_ccs_modifier(fb->modifier)) + return intel_tile_size(dev_priv); + else + return 64; + } + + tile_width = intel_tile_width_bytes(fb, color_plane); + if (intel_fb_is_ccs_modifier(fb->modifier)) { + /* + * On TGL the surface stride must be 4 tile aligned, mapped by + * one 64 byte cacheline on the CCS AUX surface. + */ + if (DISPLAY_VER(dev_priv) >= 12) + tile_width *= 4; + /* + * Display WA #0531: skl,bxt,kbl,glk + * + * Render decompression and plane width > 3840 + * combined with horizontal panning requires the + * plane stride to be a multiple of 4. We'll just + * require the entire fb to accommodate that to avoid + * potential runtime errors at plane configuration time. + */ + else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && + color_plane == 0 && fb->width > 3840) + tile_width *= 4; + } + return tile_width; +} + static int intel_plane_check_stride(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); @@ -936,7 +1715,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state) return 0; /* FIXME other color planes? */ - stride = plane_state->view.color_plane[0].stride; + stride = plane_state->view.color_plane[0].mapping_stride; max_stride = plane->max_stride(plane, fb->format->format, fb->modifier, rotation); @@ -981,3 +1760,257 @@ int intel_plane_compute_gtt(struct intel_plane_state *plane_state) return intel_plane_check_stride(plane_state); } + +static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + drm_framebuffer_cleanup(fb); + + if (intel_fb_uses_dpt(fb)) + intel_dpt_destroy(intel_fb->dpt_vm); + + intel_frontbuffer_put(intel_fb->frontbuffer); + + kfree(intel_fb); +} + +static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int *handle) +{ + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (i915_gem_object_is_userptr(obj)) { + drm_dbg(&i915->drm, + "attempting to use a userptr for a framebuffer, denied\n"); + return -EINVAL; + } + + return drm_gem_handle_create(file, &obj->base, handle); +} + +static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + + i915_gem_object_flush_if_display(obj); + intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); + + return 0; +} + +static const struct drm_framebuffer_funcs intel_fb_funcs = { + .destroy = intel_user_framebuffer_destroy, + .create_handle = intel_user_framebuffer_create_handle, + .dirty = intel_user_framebuffer_dirty, +}; + +int intel_framebuffer_init(struct intel_framebuffer *intel_fb, + struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_framebuffer *fb = &intel_fb->base; + u32 max_stride; + unsigned int tiling, stride; + int ret = -EINVAL; + int i; + + intel_fb->frontbuffer = intel_frontbuffer_get(obj); + if (!intel_fb->frontbuffer) + return -ENOMEM; + + i915_gem_object_lock(obj, NULL); + tiling = i915_gem_object_get_tiling(obj); + stride = i915_gem_object_get_stride(obj); + i915_gem_object_unlock(obj); + + if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { + /* + * If there's a fence, enforce that + * the fb modifier and tiling mode match. + */ + if (tiling != I915_TILING_NONE && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + drm_dbg_kms(&dev_priv->drm, + "tiling_mode doesn't match fb modifier\n"); + goto err; + } + } else { + if (tiling == I915_TILING_X) { + mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; + } else if (tiling == I915_TILING_Y) { + drm_dbg_kms(&dev_priv->drm, + "No Y tiling for legacy addfb\n"); + goto err; + } + } + + if (!drm_any_plane_has_format(&dev_priv->drm, + mode_cmd->pixel_format, + mode_cmd->modifier[0])) { + drm_dbg_kms(&dev_priv->drm, + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); + goto err; + } + + /* + * gen2/3 display engine uses the fence if present, + * so the tiling mode must match the fb modifier exactly. + */ + if (DISPLAY_VER(dev_priv) < 4 && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + drm_dbg_kms(&dev_priv->drm, + "tiling_mode must match fb modifier exactly on gen2/3\n"); + goto err; + } + + max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, + mode_cmd->modifier[0]); + if (mode_cmd->pitches[0] > max_stride) { + drm_dbg_kms(&dev_priv->drm, + "%s pitch (%u) must be at most %d\n", + mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? + "tiled" : "linear", + mode_cmd->pitches[0], max_stride); + goto err; + } + + /* + * If there's a fence, enforce that + * the fb pitch and fence stride match. + */ + if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { + drm_dbg_kms(&dev_priv->drm, + "pitch (%d) must match tiling stride (%d)\n", + mode_cmd->pitches[0], stride); + goto err; + } + + /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ + if (mode_cmd->offsets[0] != 0) { + drm_dbg_kms(&dev_priv->drm, + "plane 0 offset (0x%08x) must be 0\n", + mode_cmd->offsets[0]); + goto err; + } + + drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); + + for (i = 0; i < fb->format->num_planes; i++) { + u32 stride_alignment; + + if (mode_cmd->handles[i] != mode_cmd->handles[0]) { + drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", + i); + goto err; + } + + stride_alignment = intel_fb_stride_alignment(fb, i); + if (fb->pitches[i] & (stride_alignment - 1)) { + drm_dbg_kms(&dev_priv->drm, + "plane %d pitch (%d) must be at least %u byte aligned\n", + i, fb->pitches[i], stride_alignment); + goto err; + } + + if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { + int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i); + + if (fb->pitches[i] != ccs_aux_stride) { + drm_dbg_kms(&dev_priv->drm, + "ccs aux plane %d pitch (%d) must be %d\n", + i, + fb->pitches[i], ccs_aux_stride); + goto err; + } + } + + fb->obj[i] = &obj->base; + } + + ret = intel_fill_fb_info(dev_priv, intel_fb); + if (ret) + goto err; + + if (intel_fb_uses_dpt(fb)) { + struct i915_address_space *vm; + + vm = intel_dpt_create(intel_fb); + if (IS_ERR(vm)) { + ret = PTR_ERR(vm); + goto err; + } + + intel_fb->dpt_vm = vm; + } + + ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); + if (ret) { + drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); + goto err; + } + + return 0; + +err: + intel_frontbuffer_put(intel_fb->frontbuffer); + return ret; +} + +struct drm_framebuffer * +intel_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *user_mode_cmd) +{ + struct drm_framebuffer *fb; + struct drm_i915_gem_object *obj; + struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; + struct drm_i915_private *i915; + + obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + /* object is backed with LMEM for discrete */ + i915 = to_i915(obj->base.dev); + if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) { + /* object is "remote", not in local memory */ + i915_gem_object_put(obj); + return ERR_PTR(-EREMOTE); + } + + fb = intel_framebuffer_create(obj, &mode_cmd); + i915_gem_object_put(obj); + + return fb; +} + +struct drm_framebuffer * +intel_framebuffer_create(struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct intel_framebuffer *intel_fb; + int ret; + + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + if (!intel_fb) + return ERR_PTR(-ENOMEM); + + ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); + if (ret) + goto err; + + return &intel_fb->base; + +err: + kfree(intel_fb); + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 739d1b91754b..ba9df8986c1e 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -6,20 +6,45 @@ #ifndef __INTEL_FB_H__ #define __INTEL_FB_H__ +#include <linux/bits.h> #include <linux/types.h> +struct drm_device; +struct drm_file; struct drm_framebuffer; - +struct drm_i915_gem_object; struct drm_i915_private; - +struct drm_mode_fb_cmd2; struct intel_fb_view; struct intel_framebuffer; +struct intel_plane; struct intel_plane_state; -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane); -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane); +#define INTEL_PLANE_CAP_NONE 0 +#define INTEL_PLANE_CAP_CCS_RC BIT(0) +#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1) +#define INTEL_PLANE_CAP_CCS_MC BIT(2) +#define INTEL_PLANE_CAP_TILING_X BIT(3) +#define INTEL_PLANE_CAP_TILING_Y BIT(4) +#define INTEL_PLANE_CAP_TILING_Yf BIT(5) + +bool intel_fb_is_ccs_modifier(u64 modifier); +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); +bool intel_fb_is_mc_ccs_modifier(u64 modifier); + +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); + +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps); +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); + +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); + +bool +intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier); bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane); @@ -28,10 +53,14 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane); int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane); unsigned int intel_tile_size(const struct drm_i915_private *i915); +unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane); unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane); unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane); - +unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, + int color_plane, unsigned int height); unsigned int intel_cursor_alignment(const struct drm_i915_private *i915); +unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, + int color_plane); void intel_fb_plane_get_subsampling(int *hsub, int *vsub, const struct drm_framebuffer *fb, @@ -53,4 +82,14 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio struct intel_fb_view *view); int intel_plane_compute_gtt(struct intel_plane_state *plane_state); +int intel_framebuffer_init(struct intel_framebuffer *ifb, + struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd); +struct drm_framebuffer * +intel_user_framebuffer_create(struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *user_mode_cmd); + +bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); + #endif /* __INTEL_FB_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c new file mode 100644 index 000000000000..31c15e5fca95 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +/** + * DOC: display pinning helpers + */ + +#include "gem/i915_gem_object.h" + +#include "i915_drv.h" +#include "intel_display_types.h" +#include "intel_dpt.h" +#include "intel_fb.h" +#include "intel_fb_pin.h" + +static struct i915_vma * +intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, + const struct i915_ggtt_view *view, + bool uses_fence, + unsigned long *out_flags, + struct i915_address_space *vm) +{ + struct drm_device *dev = fb->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_vma *vma; + u32 alignment; + int ret; + + if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) + return ERR_PTR(-EINVAL); + + alignment = 4096 * 512; + + atomic_inc(&dev_priv->gpu_error.pending_fb_pin); + + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + vma = i915_vma_instance(obj, vm, view); + if (IS_ERR(vma)) + goto err; + + if (i915_vma_misplaced(vma, 0, alignment, 0)) { + ret = i915_vma_unbind(vma); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + } + + ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + + i915_gem_object_flush_if_display(obj); + + i915_vma_get(vma); +err: + atomic_dec(&dev_priv->gpu_error.pending_fb_pin); + + return vma; +} + +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + bool phys_cursor, + const struct i915_ggtt_view *view, + bool uses_fence, + unsigned long *out_flags) +{ + struct drm_device *dev = fb->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + intel_wakeref_t wakeref; + struct i915_gem_ww_ctx ww; + struct i915_vma *vma; + unsigned int pinctl; + u32 alignment; + int ret; + + if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) + return ERR_PTR(-EINVAL); + + if (phys_cursor) + alignment = intel_cursor_alignment(dev_priv); + else + alignment = intel_surf_alignment(fb, 0); + if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) + return ERR_PTR(-EINVAL); + + /* Note that the w/a also requires 64 PTE of padding following the + * bo. We currently fill all unused PTE with the shadow page and so + * we should always have valid PTE following the scanout preventing + * the VT-d warning. + */ + if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) + alignment = 256 * 1024; + + /* + * Global gtt pte registers are special registers which actually forward + * writes to a chunk of system memory. Which means that there is no risk + * that the register values disappear as soon as we call + * intel_runtime_pm_put(), so it is correct to wrap only the + * pin/unpin/fence and not more. + */ + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + atomic_inc(&dev_priv->gpu_error.pending_fb_pin); + + /* + * Valleyview is definitely limited to scanning out the first + * 512MiB. Lets presume this behaviour was inherited from the + * g4x display engine and that all earlier gen are similarly + * limited. Testing suggests that it is a little more + * complicated than this. For example, Cherryview appears quite + * happy to scanout from anywhere within its global aperture. + */ + pinctl = 0; + if (HAS_GMCH(dev_priv)) + pinctl |= PIN_MAPPABLE; + + i915_gem_ww_ctx_init(&ww, true); +retry: + ret = i915_gem_object_lock(obj, &ww); + if (!ret && phys_cursor) + ret = i915_gem_object_attach_phys(obj, alignment); + else if (!ret && HAS_LMEM(dev_priv)) + ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM); + /* TODO: Do we need to sync when migration becomes async? */ + if (!ret) + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err; + + vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, + view, pinctl); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unpin; + } + + if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { + /* + * Install a fence for tiled scan-out. Pre-i965 always needs a + * fence, whereas 965+ only requires a fence if using + * framebuffer compression. For simplicity, we always, when + * possible, install a fence as the cost is not that onerous. + * + * If we fail to fence the tiled scanout, then either the + * modeset will reject the change (which is highly unlikely as + * the affected systems, all but one, do not have unmappable + * space) or we will not be able to enable full powersaving + * techniques (also likely not to apply due to various limits + * FBC and the like impose on the size of the buffer, which + * presumably we violated anyway with this unmappable buffer). + * Anyway, it is presumably better to stumble onwards with + * something and try to run the system in a "less than optimal" + * mode that matches the user configuration. + */ + ret = i915_vma_pin_fence(vma); + if (ret != 0 && DISPLAY_VER(dev_priv) < 4) { + i915_vma_unpin(vma); + goto err_unpin; + } + ret = 0; + + if (vma->fence) + *out_flags |= PLANE_HAS_FENCE; + } + + i915_vma_get(vma); + +err_unpin: + i915_gem_object_unpin_pages(obj); +err: + if (ret == -EDEADLK) { + ret = i915_gem_ww_ctx_backoff(&ww); + if (!ret) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + if (ret) + vma = ERR_PTR(ret); + + atomic_dec(&dev_priv->gpu_error.pending_fb_pin); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + return vma; +} + +void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) +{ + if (flags & PLANE_HAS_FENCE) + i915_vma_unpin_fence(vma); + i915_vma_unpin(vma); + i915_vma_put(vma); +} + +int intel_plane_pin_fb(struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct drm_framebuffer *fb = plane_state->hw.fb; + struct i915_vma *vma; + bool phys_cursor = + plane->id == PLANE_CURSOR && + INTEL_INFO(dev_priv)->display.cursor_needs_physical; + + if (!intel_fb_uses_dpt(fb)) { + vma = intel_pin_and_fence_fb_obj(fb, phys_cursor, + &plane_state->view.gtt, + intel_plane_uses_fence(plane_state), + &plane_state->flags); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + plane_state->ggtt_vma = vma; + } else { + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + vma = intel_dpt_pin(intel_fb->dpt_vm); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + plane_state->ggtt_vma = vma; + + vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false, + &plane_state->flags, intel_fb->dpt_vm); + if (IS_ERR(vma)) { + intel_dpt_unpin(intel_fb->dpt_vm); + plane_state->ggtt_vma = NULL; + return PTR_ERR(vma); + } + + plane_state->dpt_vma = vma; + + WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma); + } + + return 0; +} + +void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) +{ + struct drm_framebuffer *fb = old_plane_state->hw.fb; + struct i915_vma *vma; + + if (!intel_fb_uses_dpt(fb)) { + vma = fetch_and_zero(&old_plane_state->ggtt_vma); + if (vma) + intel_unpin_fb_vma(vma, old_plane_state->flags); + } else { + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + vma = fetch_and_zero(&old_plane_state->dpt_vma); + if (vma) + intel_unpin_fb_vma(vma, old_plane_state->flags); + + vma = fetch_and_zero(&old_plane_state->ggtt_vma); + if (vma) + intel_dpt_unpin(intel_fb->dpt_vm); + } +} diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h new file mode 100644 index 000000000000..e4fcd0218d9d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_FB_PIN_H__ +#define __INTEL_FB_PIN_H__ + +#include <linux/types.h> + +struct drm_framebuffer; +struct i915_vma; +struct intel_plane_state; +struct i915_ggtt_view; + +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + bool phys_cursor, + const struct i915_ggtt_view *view, + bool uses_fence, + unsigned long *out_flags); + +void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); + +int intel_plane_pin_fb(struct intel_plane_state *plane_state); +void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index ddfc17e21668..8be01b93015f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -41,124 +41,331 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_cdclk.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" -/* - * For SKL+, the plane source size used by the hardware is based on the value we - * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value - * we wrote to PIPESRC. - */ -static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache, - int *width, int *height) +struct intel_fbc_funcs { + void (*activate)(struct intel_fbc *fbc); + void (*deactivate)(struct intel_fbc *fbc); + bool (*is_active)(struct intel_fbc *fbc); + bool (*is_compressing)(struct intel_fbc *fbc); + void (*nuke)(struct intel_fbc *fbc); + void (*program_cfb)(struct intel_fbc *fbc); + void (*set_false_color)(struct intel_fbc *fbc, bool enable); +}; + +struct intel_fbc_state { + struct intel_plane *plane; + unsigned int cfb_stride; + unsigned int cfb_size; + unsigned int fence_y_offset; + u16 override_cfb_stride; + u16 interval; + s8 fence_id; +}; + +struct intel_fbc { + struct drm_i915_private *i915; + const struct intel_fbc_funcs *funcs; + + /* + * This is always the inner lock when overlapping with + * struct_mutex and it's the outer lock when overlapping + * with stolen_lock. + */ + struct mutex lock; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; + + struct drm_mm_node compressed_fb; + struct drm_mm_node compressed_llb; + + u8 limit; + + bool false_color; + + bool active; + bool activated; + bool flip_pending; + + bool underrun_detected; + struct work_struct underrun_work; + + /* + * This structure contains everything that's relevant to program the + * hardware registers. When we want to figure out if we need to disable + * and re-enable FBC for a new configuration we just check if there's + * something different in the struct. The genx_fbc_activate functions + * are supposed to read from it in order to program the registers. + */ + struct intel_fbc_state state; + const char *no_fbc_reason; +}; + +/* plane stride in pixels */ +static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state) { - if (width) - *width = cache->plane.src_w; - if (height) - *height = cache->plane.src_h; + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride; + + stride = plane_state->view.color_plane[0].mapping_stride; + if (!drm_rotation_90_or_270(plane_state->hw.rotation)) + stride /= fb->format->cpp[0]; + + return stride; } -static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, - const struct intel_fbc_state_cache *cache) +/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */ +static unsigned int _intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) { - int lines; + unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ - intel_fbc_get_plane_source_size(cache, NULL, &lines); - if (DISPLAY_VER(dev_priv) == 7) + return intel_fbc_plane_stride(plane_state) * cpp; +} + +/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ +static unsigned int skl_fbc_min_cfb_stride(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int limit = 4; /* 1:4 compression limit is the worst case */ + unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ + unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16; + unsigned int height = 4; /* FBC segment is 4 lines */ + unsigned int stride; + + /* minimum segment stride we can use */ + stride = width * cpp * height / limit; + + /* + * Wa_16011863758: icl+ + * Avoid some hardware segment address miscalculation. + */ + if (DISPLAY_VER(i915) >= 11) + stride += 64; + + /* + * At least some of the platforms require each 4 line segment to + * be 512 byte aligned. Just do it always for simplicity. + */ + stride = ALIGN(stride, 512); + + /* convert back to single line equivalent with 1:1 compression limit */ + return stride * limit / height; +} + +/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ +static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); + + /* + * At least some of the platforms require each 4 line segment to + * be 512 byte aligned. Aligning each line to 512 bytes guarantees + * that regardless of the compression limit we choose later. + */ + if (DISPLAY_VER(i915) >= 9) + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(plane_state)); + else + return stride; +} + +static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + int lines = drm_rect_height(&plane_state->uapi.src) >> 16; + + if (DISPLAY_VER(i915) == 7) lines = min(lines, 2048); - else if (DISPLAY_VER(dev_priv) >= 8) + else if (DISPLAY_VER(i915) >= 8) lines = min(lines, 2560); - /* Hardware needs the full buffer stride, not just the active area. */ - return lines * cache->fb.stride; + return lines * intel_fbc_cfb_stride(plane_state); +} + +static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state); + unsigned int stride = _intel_fbc_cfb_stride(plane_state); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + /* + * Override stride in 64 byte units per 4 line segment. + * + * Gen9 hw miscalculates cfb stride for linear as + * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so + * we always need to use the override there. + */ + if (stride != stride_aligned || + (DISPLAY_VER(i915) == 9 && fb->modifier == DRM_FORMAT_MOD_LINEAR)) + return stride_aligned * 4 / 64; + + return 0; } -static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + unsigned int cfb_stride; + u32 fbc_ctl; + + cfb_stride = fbc_state->cfb_stride / fbc->limit; + + /* FBC_CTL wants 32B or 64B units */ + if (DISPLAY_VER(i915) == 2) + cfb_stride = (cfb_stride / 32) - 1; + else + cfb_stride = (cfb_stride / 64) - 1; + + fbc_ctl = FBC_CTL_PERIODIC | + FBC_CTL_INTERVAL(fbc_state->interval) | + FBC_CTL_STRIDE(cfb_stride); + + if (IS_I945GM(i915)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + + if (fbc_state->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(fbc_state->fence_id); + + return fbc_ctl; +} + +static u32 i965_fbc_ctl2(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + u32 fbc_ctl2; + + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | + FBC_CTL_PLANE(fbc_state->plane->i9xx_plane); + + if (fbc_state->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; + + return fbc_ctl2; +} + +static void i8xx_fbc_deactivate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; u32 fbc_ctl; /* Disable compression */ - fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl = intel_de_read(i915, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + if (intel_de_wait_for_clear(i915, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { - drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); return; } } -static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +static void i8xx_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - int cfb_pitch; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; int i; - u32 fbc_ctl; - - /* Note: fbc.limit == 1 for i8xx */ - cfb_pitch = params->cfb_size / FBC_LL_SIZE; - if (params->fb.stride < cfb_pitch) - cfb_pitch = params->fb.stride; - - /* FBC_CTL wants 32B or 64B units */ - if (DISPLAY_VER(dev_priv) == 2) - cfb_pitch = (cfb_pitch / 32) - 1; - else - cfb_pitch = (cfb_pitch / 64) - 1; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - intel_de_write(dev_priv, FBC_TAG(i), 0); - - if (DISPLAY_VER(dev_priv) == 4) { - u32 fbc_ctl2; - - /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; - fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); - if (params->fence_id >= 0) - fbc_ctl2 |= FBC_CTL_CPU_FENCE; - intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); - intel_de_write(dev_priv, FBC_FENCE_OFF, - params->fence_y_offset); + intel_de_write(i915, FBC_TAG(i), 0); + + if (DISPLAY_VER(i915) == 4) { + intel_de_write(i915, FBC_CONTROL2, + i965_fbc_ctl2(fbc)); + intel_de_write(i915, FBC_FENCE_OFF, + fbc_state->fence_y_offset); } - /* enable it... */ - fbc_ctl = FBC_CTL_INTERVAL(params->interval); - fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; - if (IS_I945GM(dev_priv)) - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); - if (params->fence_id >= 0) - fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, + FBC_CTL_EN | i8xx_fbc_ctl(fbc)); +} + +static bool i8xx_fbc_is_active(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; +} + +static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, FBC_STATUS) & + (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); +} + +static void i8xx_fbc_nuke(struct intel_fbc *fbc) +{ + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); } -static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; + struct drm_i915_private *i915 = fbc->i915; + + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_fb.start, U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_llb.start, U32_MAX)); + + intel_de_write(i915, FBC_CFB_BASE, + i915->dsm.start + fbc->compressed_fb.start); + intel_de_write(i915, FBC_LL_BASE, + i915->dsm.start + fbc->compressed_llb.start); } -static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) +static const struct intel_fbc_funcs i8xx_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i8xx_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; + +static void i965_fbc_nuke(struct intel_fbc *fbc) { - const struct intel_fbc_reg_params *params = &i915->fbc.params; - int limit = i915->fbc.limit; + struct intel_fbc_state *fbc_state = &fbc->state; + enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} - if (params->fb.format->cpp[0] == 2) - limit <<= 1; +static const struct intel_fbc_funcs i965_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; - switch (limit) { +static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) +{ + switch (fbc->limit) { default: - MISSING_CASE(limit); + MISSING_CASE(fbc->limit); fallthrough; case 1: return DPFC_CTL_LIMIT_1X; @@ -169,248 +376,306 @@ static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) } } -static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | + DPFC_CTL_PLANE_G4X(fbc_state->plane->i9xx_plane); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + if (IS_G4X(i915)) + dpfc_ctl |= DPFC_CTL_SR_EN; - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; - intel_de_write(dev_priv, DPFC_FENCE_YOFF, - params->fence_y_offset); - } else { - intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + if (fbc_state->fence_id >= 0) { + dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; + + if (DISPLAY_VER(i915) < 6) + dpfc_ctl |= DPFC_CTL_FENCENO(fbc_state->fence_id); } - /* enable it... */ - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + return dpfc_ctl; +} + +static void g4x_fbc_activate(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_FENCE_YOFF, + fbc_state->fence_y_offset); + + intel_de_write(i915, DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } -static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +static void g4x_fbc_deactivate(struct intel_fbc *fbc) { + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); } } -static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; } -static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; +} - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), - intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); +static void g4x_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start); } -static void i965_fbc_recompress(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs g4x_fbc_funcs = { + .activate = g4x_fbc_activate, + .deactivate = g4x_fbc_deactivate, + .is_active = g4x_fbc_is_active, + .is_compressing = g4x_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = g4x_fbc_program_cfb, +}; + +static void ilk_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), - intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + fbc_state->fence_y_offset); + + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } -/* This function forces a CFB recompression through the nuke operation. */ -static void snb_fbc_recompress(struct drm_i915_private *dev_priv) +static void ilk_fbc_deactivate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; - trace_intel_fbc_nuke(fbc->crtc); + /* Disable compression */ + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + } +} - intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); +static bool ilk_fbc_is_active(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; } -static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - if (DISPLAY_VER(dev_priv) >= 6) - snb_fbc_recompress(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 4) - i965_fbc_recompress(dev_priv); - else - i8xx_fbc_recompress(dev_priv); + return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; } -static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +static void ilk_fbc_program_cfb(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - u32 dpfc_ctl; + struct drm_i915_private *i915 = fbc->i915; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); +} - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); +static const struct intel_fbc_funcs ilk_fbc_funcs = { + .activate = ilk_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_IRONLAKE(dev_priv)) - dpfc_ctl |= params->fence_id; - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } - } else { - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } - } +static void snb_fbc_program_fence(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 ctl = 0; - intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, - params->fence_y_offset); - /* enable it... */ - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + if (fbc_state->fence_id >= 0) + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(fbc_state->fence_id); - intel_fbc_recompress(dev_priv); + intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, fbc_state->fence_y_offset); } -static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +static void snb_fbc_activate(struct intel_fbc *fbc) { - u32 dpfc_ctl; + snb_fbc_program_fence(fbc); - /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); - } + ilk_fbc_activate(fbc); } -static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +static void snb_fbc_nuke(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE); } -static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs snb_fbc_funcs = { + .activate = snb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; + +static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - u32 dpfc_ctl; + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + if (fbc_state->override_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | + FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); + + intel_de_write(i915, GLK_FBC_STRIDE, val); +} + +static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; /* Display WA #0529: skl, kbl, bxt. */ - if (DISPLAY_VER(dev_priv) == 9) { - u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); + if (fbc_state->override_cfb_stride) + val |= CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); - val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + intel_de_rmw(i915, CHICKEN_MISC_4, + CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE_MASK, val); +} - if (params->gen9_wa_cfb_stride) - val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; +static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) +{ + const struct intel_fbc_state *fbc_state = &fbc->state; + struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; - intel_de_write(dev_priv, CHICKEN_MISC_4, val); - } + dpfc_ctl = g4x_dpfc_ctl_limit(fbc); - dpfc_ctl = 0; - if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); - - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); - - if (params->fence_id >= 0) { - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } else if (dev_priv->ggtt.num_fences) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } + if (IS_IVYBRIDGE(i915)) + dpfc_ctl |= DPFC_CTL_PLANE_IVB(fbc_state->plane->i9xx_plane); - if (dev_priv->fbc.false_color) - dpfc_ctl |= FBC_CTL_FALSE_COLOR; + if (fbc_state->fence_id >= 0) + dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + if (fbc->false_color) + dpfc_ctl |= DPFC_CTL_FALSE_COLOR; - intel_fbc_recompress(dev_priv); + return dpfc_ctl; } -static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +static void ivb_fbc_activate(struct intel_fbc *fbc) { - if (DISPLAY_VER(dev_priv) >= 5) - return ilk_fbc_is_active(dev_priv); - else if (IS_GM45(dev_priv)) - return g4x_fbc_is_active(dev_priv); - else - return i8xx_fbc_is_active(dev_priv); + struct drm_i915_private *i915 = fbc->i915; + + if (DISPLAY_VER(i915) >= 10) + glk_fbc_program_cfb_stride(fbc); + else if (DISPLAY_VER(i915) == 9) + skl_fbc_program_cfb_stride(fbc); + + if (i915->ggtt.num_fences) + snb_fbc_program_fence(fbc); + + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); +} + +static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; } -static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +static void ivb_fbc_set_false_color(struct intel_fbc *fbc, + bool enable) { - struct intel_fbc *fbc = &dev_priv->fbc; + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); +} - trace_intel_fbc_activate(fbc->crtc); +static const struct intel_fbc_funcs ivb_fbc_funcs = { + .activate = ivb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ivb_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + .set_false_color = ivb_fbc_set_false_color, +}; + +static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) +{ + return fbc->funcs->is_active(fbc); +} + +static void intel_fbc_hw_activate(struct intel_fbc *fbc) +{ + trace_intel_fbc_activate(fbc->state.plane); fbc->active = true; fbc->activated = true; - if (DISPLAY_VER(dev_priv) >= 7) - gen7_fbc_activate(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_activate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_activate(dev_priv); - else - i8xx_fbc_activate(dev_priv); + fbc->funcs->activate(fbc); } -static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - trace_intel_fbc_deactivate(fbc->crtc); + trace_intel_fbc_deactivate(fbc->state.plane); fbc->active = false; - if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_deactivate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_deactivate(dev_priv); - else - i8xx_fbc_deactivate(dev_priv); + fbc->funcs->deactivate(fbc); } -/** - * intel_fbc_is_active - Is FBC active? - * @dev_priv: i915 device instance - * - * This function is used to verify the current state of FBC. - * - * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. - */ -bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_compressing(struct intel_fbc *fbc) { - return dev_priv->fbc.active; + return fbc->funcs->is_compressing(fbc); +} + +static void intel_fbc_nuke(struct intel_fbc *fbc) +{ + trace_intel_fbc_nuke(fbc->state.plane); + + fbc->funcs->nuke(fbc); +} + +static void intel_fbc_activate(struct intel_fbc *fbc) +{ + intel_fbc_hw_activate(fbc); + intel_fbc_nuke(fbc); + + fbc->no_fbc_reason = NULL; } -static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, - const char *reason) +static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (fbc->active) - intel_fbc_hw_deactivate(dev_priv); + intel_fbc_hw_deactivate(fbc); fbc->no_fbc_reason = reason; } @@ -423,7 +688,7 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) return BIT_ULL(32); } -static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) +static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) { u64 end; @@ -431,48 +696,50 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) * reserved range size, so it always assumes the maximum (8mb) is used. * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ - if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 && - !IS_BROXTON(dev_priv))) - end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + if (IS_BROADWELL(i915) || + (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) + end = resource_size(&i915->dsm) - 8 * 1024 * 1024; else end = U64_MAX; - return min(end, intel_fbc_cfb_base_max(dev_priv)); + return min(end, intel_fbc_cfb_base_max(i915)); } -static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp) +static int intel_fbc_min_limit(const struct intel_plane_state *plane_state) { - /* - * FIXME: FBC1 can have arbitrary cfb stride, - * so we could support different compression ratios. - */ - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - return 1; + return plane_state->hw.fb->format->cpp[0] == 2 ? 2 : 1; +} +static int intel_fbc_max_limit(struct drm_i915_private *i915) +{ /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return 1; - /* FBC2 can only do 1:1, 1:2, 1:4 */ - return fb_cpp == 2 ? 2 : 4; + /* + * FBC2 can only do 1:1, 1:2, 1:4, we limit + * FBC1 to the same out of convenience. + */ + return 4; } -static int find_compression_limit(struct drm_i915_private *dev_priv, - unsigned int size, - unsigned int fb_cpp) +static int find_compression_limit(struct intel_fbc *fbc, + unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; - u64 end = intel_fbc_stolen_end(dev_priv); - int ret, limit = 1; + struct drm_i915_private *i915 = fbc->i915; + u64 end = intel_fbc_stolen_end(i915); + int ret, limit = min_limit; + + size /= limit; /* Try to over-allocate to reduce reallocations and fragmentation. */ - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size <<= 1, 4096, 0, end); if (ret == 0) return limit; - for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) { - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) return limit; @@ -481,35 +748,34 @@ static int find_compression_limit(struct drm_i915_private *dev_priv, return 0; } -static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, - unsigned int size, unsigned int fb_cpp) +static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, + unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; int ret; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_fb)); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_llb)); - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb, + if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { + ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 4096, 4096); if (ret) goto err; } - ret = find_compression_limit(dev_priv, size, fb_cpp); + ret = find_compression_limit(fbc, size, min_limit); if (!ret) goto err_llb; - else if (ret > 1) { - drm_info_once(&dev_priv->drm, + else if (ret > min_limit) + drm_info_once(&i915->drm, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); - } fbc->limit = ret; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", fbc->compressed_fb.size, fbc->limit); @@ -517,83 +783,69 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, err_llb: if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); err: - if (drm_mm_initialized(&dev_priv->mm.stolen)) - drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + if (drm_mm_initialized(&i915->mm.stolen)) + drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } -static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv) +static void intel_fbc_program_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (DISPLAY_VER(dev_priv) >= 5) { - intel_de_write(dev_priv, ILK_DPFC_CB_BASE, - fbc->compressed_fb.start); - } else if (IS_GM45(dev_priv)) { - intel_de_write(dev_priv, DPFC_CB_BASE, - fbc->compressed_fb.start); - } else { - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_fb.start, - U32_MAX)); - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_llb.start, - U32_MAX)); - - intel_de_write(dev_priv, FBC_CFB_BASE, - dev_priv->dsm.start + fbc->compressed_fb.start); - intel_de_write(dev_priv, FBC_LL_BASE, - dev_priv->dsm.start + fbc->compressed_llb.start); - } + fbc->funcs->program_cfb(fbc); } -static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); if (drm_mm_node_allocated(&fbc->compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); } -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(dev_priv); + __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); + + kfree(fbc); } -static bool stride_is_valid(struct drm_i915_private *dev_priv, - u64 modifier, unsigned int stride) +static bool stride_is_valid(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int stride = intel_fbc_plane_stride(plane_state) * + fb->format->cpp[0]; + /* This should have been caught earlier. */ - if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) return false; /* Below are the additional FBC restrictions. */ if (stride < 512) return false; - if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3) + if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) return stride == 4096 || stride == 8192; - if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048) + if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) return false; /* Display WA #1105: skl,bxt,kbl,cfl,glk */ - if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && - modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) + if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && + fb->modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; if (stride > 16384) @@ -602,20 +854,22 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, return true; } -static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, - u32 pixel_format) +static bool pixel_format_is_valid(const struct intel_plane_state *plane_state) { - switch (pixel_format) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->format->format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(i915) == 2) return false; /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return false; return true; default: @@ -623,13 +877,16 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, } } -static bool rotation_is_valid(struct drm_i915_private *dev_priv, - u32 pixel_format, unsigned int rotation) +static bool rotation_is_valid(const struct intel_plane_state *plane_state) { - if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + + if (DISPLAY_VER(i915) >= 9 && fb->format->format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; - else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) && + else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && rotation != DRM_MODE_ROTATE_0) return false; @@ -642,19 +899,18 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv, * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ -static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); unsigned int effective_w, effective_h, max_w, max_h; - if (DISPLAY_VER(dev_priv) >= 10) { + if (DISPLAY_VER(i915) >= 10) { max_w = 5120; max_h = 4096; - } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { max_w = 4096; max_h = 4096; - } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) { + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { max_w = 4096; max_h = 2048; } else { @@ -662,235 +918,188 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) max_h = 1536; } - intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, - &effective_h); - effective_w += fbc->state_cache.plane.adjusted_x; - effective_h += fbc->state_cache.plane.adjusted_y; + effective_w = plane_state->view.color_plane[0].x + + (drm_rect_width(&plane_state->uapi.src) >> 16); + effective_h = plane_state->view.color_plane[0].y + + (drm_rect_height(&plane_state->uapi.src) >> 16); return effective_w <= max_w && effective_h <= max_h; } -static bool tiling_is_valid(struct drm_i915_private *dev_priv, - u64 modifier) +static bool tiling_is_valid(const struct intel_plane_state *plane_state) { - switch (modifier) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + + switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: - if (DISPLAY_VER(dev_priv) >= 9) - return true; - return false; - case I915_FORMAT_MOD_X_TILED: case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + return DISPLAY_VER(i915) >= 9; + case I915_FORMAT_MOD_X_TILED: return true; default: return false; } } -static void intel_fbc_update_state_cache(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void intel_fbc_update_state(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - struct drm_framebuffer *fb = plane_state->hw.fb; - - cache->plane.visible = plane_state->uapi.visible; - if (!cache->plane.visible) - return; - - cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; - - cache->plane.rotation = plane_state->hw.rotation; - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - cache->plane.adjusted_x = plane_state->view.color_plane[0].x; - cache->plane.adjusted_y = plane_state->view.color_plane[0].y; - - cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_fbc *fbc = plane->fbc; + struct intel_fbc_state *fbc_state = &fbc->state; - cache->fb.format = fb->format; - cache->fb.modifier = fb->modifier; + WARN_ON(plane_state->no_fbc_reason); - /* FIXME is this correct? */ - cache->fb.stride = plane_state->view.color_plane[0].stride; - if (drm_rotation_90_or_270(plane_state->hw.rotation)) - cache->fb.stride *= fb->format->cpp[0]; + fbc_state->plane = plane; /* FBC1 compression interval: arbitrary choice of 1 second */ - cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); + fbc_state->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); - cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); + fbc_state->fence_y_offset = intel_plane_fence_y_offset(plane_state); - drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->ggtt_vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && plane_state->ggtt_vma->fence) - cache->fence_id = plane_state->ggtt_vma->fence->id; + fbc_state->fence_id = plane_state->ggtt_vma->fence->id; else - cache->fence_id = -1; + fbc_state->fence_id = -1; - cache->psr2_active = crtc_state->has_psr2; + fbc_state->cfb_stride = intel_fbc_cfb_stride(plane_state); + fbc_state->cfb_size = intel_fbc_cfb_size(plane_state); + fbc_state->override_cfb_stride = intel_fbc_override_cfb_stride(plane_state); } -static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > - fbc->compressed_fb.size * fbc->limit; + /* The use of a CPU fence is one of two ways to detect writes by the + * CPU to the scanout and trigger updates to the FBC. + * + * The other method is by software tracking (see + * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke + * the current compressed buffer and recompress it. + * + * Note that is possible for a tiled surface to be unmappable (and + * so have no fence associated with it) due to aperture constraints + * at the time of pinning. + * + * FIXME with 90/270 degree rotation we should use the fence on + * the normal GTT view (the rotated view doesn't even have a + * fence). Would need changes to the FBC fence Y offset as well. + * For now this will effectively disable FBC with 90/270 degree + * rotation. + */ + return DISPLAY_VER(i915) >= 9 || + (plane_state->flags & PLANE_HAS_FENCE && + plane_state->ggtt_vma->fence); } -static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state) { - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct intel_fbc *fbc = plane->fbc; - if ((DISPLAY_VER(dev_priv) == 9) && - cache->fb.modifier != I915_FORMAT_MOD_X_TILED) - return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8; - else - return 0; + return intel_fbc_min_limit(plane_state) <= fbc->limit && + intel_fbc_cfb_size(plane_state) <= fbc->compressed_fb.size * fbc->limit; } -static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) +static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state) { - struct intel_fbc *fbc = &dev_priv->fbc; - - return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); + return !plane_state->no_fbc_reason && + intel_fbc_is_fence_ok(plane_state) && + intel_fbc_is_cfb_ok(plane_state); } -static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +static int intel_fbc_check_plane(struct intel_atomic_state *state, + struct intel_plane *plane) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); + const struct intel_crtc_state *crtc_state; + struct intel_fbc *fbc = plane->fbc; - if (intel_vgpu_active(dev_priv)) { - fbc->no_fbc_reason = "VGPU is active"; - return false; - } + if (!fbc) + return 0; - if (!dev_priv->params.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param or by default"; - return false; + if (intel_vgpu_active(i915)) { + plane_state->no_fbc_reason = "VGPU active"; + return 0; } - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; + if (!i915->params.enable_fbc) { + plane_state->no_fbc_reason = "disabled per module param or by default"; + return 0; } - return true; -} - -static bool intel_fbc_can_activate(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - - if (!intel_fbc_can_enable(dev_priv)) - return false; - - if (!cache->plane.visible) { - fbc->no_fbc_reason = "primary plane not visible"; - return false; + if (!plane_state->uapi.visible) { + plane_state->no_fbc_reason = "plane not visible"; + return 0; } - /* We don't need to use a state cache here since this information is - * global for all CRTC. - */ - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; - } + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { - fbc->no_fbc_reason = "incompatible mode"; - return false; + if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + plane_state->no_fbc_reason = "interlaced mode not supported"; + return 0; } - if (!intel_fbc_hw_tracking_covers_screen(crtc)) { - fbc->no_fbc_reason = "mode too large for compression"; - return false; + if (crtc_state->double_wide) { + plane_state->no_fbc_reason = "double wide pipe not supported"; + return 0; } - /* The use of a CPU fence is one of two ways to detect writes by the - * CPU to the scanout and trigger updates to the FBC. - * - * The other method is by software tracking (see - * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke - * the current compressed buffer and recompress it. - * - * Note that is possible for a tiled surface to be unmappable (and - * so have no fence associated with it) due to aperture constraints - * at the time of pinning. - * - * FIXME with 90/270 degree rotation we should use the fence on - * the normal GTT view (the rotated view doesn't even have a - * fence). Would need changes to the FBC fence Y offset as well. - * For now this will effectively disable FBC with 90/270 degree - * rotation. + /* + * Display 12+ is not supporting FBC with PSR2. + * Recommendation is to keep this combination disabled + * Bspec: 50422 HSD: 14010260002 */ - if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) { - fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) { + plane_state->no_fbc_reason = "PSR2 enabled"; return false; } - if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { - fbc->no_fbc_reason = "pixel format is invalid"; - return false; - } - - if (!rotation_is_valid(dev_priv, cache->fb.format->format, - cache->plane.rotation)) { - fbc->no_fbc_reason = "rotation unsupported"; - return false; + if (!pixel_format_is_valid(plane_state)) { + plane_state->no_fbc_reason = "pixel format not supported"; + return 0; } - if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { - fbc->no_fbc_reason = "tiling unsupported"; - return false; + if (!tiling_is_valid(plane_state)) { + plane_state->no_fbc_reason = "tiling not supported"; + return 0; } - if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) { - fbc->no_fbc_reason = "framebuffer stride not supported"; - return false; + if (!rotation_is_valid(plane_state)) { + plane_state->no_fbc_reason = "rotation not supported"; + return 0; } - if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && - cache->fb.format->has_alpha) { - fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; - return false; + if (!stride_is_valid(plane_state)) { + plane_state->no_fbc_reason = "stride not supported"; + return 0; } - /* WaFbcExceedCdClockThreshold:hsw,bdw */ - if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { - fbc->no_fbc_reason = "pixel rate is too big"; + if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && + fb->format->has_alpha) { + plane_state->no_fbc_reason = "per-pixel alpha not supported"; return false; } - /* It is possible for the required CFB size change without a - * crtc->disable + crtc->enable since it is possible to change the - * stride without triggering a full modeset. Since we try to - * over-allocate the CFB, there's a chance we may keep FBC enabled even - * if this happens, but if we exceed the current CFB size we'll have to - * disable FBC. Notice that it would be possible to disable FBC, wait - * for a frame, free the stolen node, then try to reenable FBC in case - * we didn't get any invalidate/deactivate calls, but this would require - * a lot of tracking just for a specific case. If we conclude it's an - * important case, we can implement it later. */ - if (intel_fbc_cfb_size_changed(dev_priv)) { - fbc->no_fbc_reason = "CFB requirements changed"; - return false; + if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { + plane_state->no_fbc_reason = "plane size too big"; + return 0; } /* @@ -898,407 +1107,318 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (DISPLAY_VER(dev_priv) >= 9 && - (fbc->state_cache.plane.adjusted_y & 3)) { - fbc->no_fbc_reason = "plane Y offset is misaligned"; + if (DISPLAY_VER(i915) >= 9 && + plane_state->view.color_plane[0].y & 3) { + plane_state->no_fbc_reason = "plane start Y offset misaligned"; return false; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ - if (DISPLAY_VER(dev_priv) >= 11 && - (cache->plane.src_h + cache->plane.adjusted_y) % 4) { - fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; + if (DISPLAY_VER(i915) >= 11 && + (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) { + plane_state->no_fbc_reason = "plane end Y offset misaligned"; return false; } - /* - * Display 12+ is not supporting FBC with PSR2. - * Recommendation is to keep this combination disabled - * Bspec: 50422 HSD: 14010260002 - */ - if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) { - fbc->no_fbc_reason = "not supported with PSR2"; - return false; - } - - return true; -} - -static void intel_fbc_get_reg_params(struct intel_crtc *crtc, - struct intel_fbc_reg_params *params) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - - /* Since all our fields are integer types, use memset here so the - * comparison function can rely on memcmp because the padding will be - * zero. */ - memset(params, 0, sizeof(*params)); - - params->fence_id = cache->fence_id; - params->fence_y_offset = cache->fence_y_offset; - - params->interval = cache->interval; - - params->crtc.pipe = crtc->pipe; - params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; + /* WaFbcExceedCdClockThreshold:hsw,bdw */ + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + const struct intel_cdclk_state *cdclk_state; - params->fb.format = cache->fb.format; - params->fb.modifier = cache->fb.modifier; - params->fb.stride = cache->fb.stride; + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); - params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { + plane_state->no_fbc_reason = "pixel rate too high"; + return 0; + } + } - params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride; + plane_state->no_fbc_reason = NULL; - params->plane_visible = cache->plane.visible; + return 0; } -static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) + +static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_fbc *fbc = &dev_priv->fbc; - const struct intel_fbc_state_cache *cache = &fbc->state_cache; - const struct intel_fbc_reg_params *params = &fbc->params; + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + const struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, plane); + const struct drm_framebuffer *old_fb = old_plane_state->hw.fb; + const struct drm_framebuffer *new_fb = new_plane_state->hw.fb; - if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) + if (drm_atomic_crtc_needs_modeset(&new_crtc_state->uapi)) return false; - if (!params->plane_visible) + if (!intel_fbc_is_ok(old_plane_state) || + !intel_fbc_is_ok(new_plane_state)) return false; - if (!intel_fbc_can_activate(crtc)) + if (old_fb->format->format != new_fb->format->format) return false; - if (params->fb.format != cache->fb.format) + if (old_fb->modifier != new_fb->modifier) return false; - if (params->fb.modifier != cache->fb.modifier) + if (intel_fbc_plane_stride(old_plane_state) != + intel_fbc_plane_stride(new_plane_state)) return false; - if (params->fb.stride != cache->fb.stride) + if (intel_fbc_cfb_stride(old_plane_state) != + intel_fbc_cfb_stride(new_plane_state)) return false; - if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache)) + if (intel_fbc_cfb_size(old_plane_state) != + intel_fbc_cfb_size(new_plane_state)) return false; - if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride) + if (intel_fbc_override_cfb_stride(old_plane_state) != + intel_fbc_override_cfb_stride(new_plane_state)) return false; return true; } -bool intel_fbc_pre_update(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static bool __intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - const char *reason = "update pending"; + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_fbc *fbc = plane->fbc; bool need_vblank_wait = false; - if (!plane->has_fbc || !plane_state) - return need_vblank_wait; + fbc->flip_pending = true; - mutex_lock(&fbc->lock); + if (intel_fbc_can_flip_nuke(state, crtc, plane)) + return need_vblank_wait; - if (fbc->crtc != crtc) - goto unlock; + intel_fbc_deactivate(fbc, "update pending"); - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); - fbc->flip_pending = true; - - if (!intel_fbc_can_flip_nuke(crtc_state)) { - intel_fbc_deactivate(dev_priv, reason); - - /* - * Display WA #1198: glk+ - * Need an extra vblank wait between FBC disable and most plane - * updates. Bspec says this is only needed for plane disable, but - * that is not true. Touching most plane registers will cause the - * corruption to appear. Also SKL/derivatives do not seem to be - * affected. - * - * TODO: could optimize this a bit by sampling the frame - * counter when we disable FBC (if it was already done earlier) - * and skipping the extra vblank wait before the plane update - * if at least one frame has already passed. - */ - if (fbc->activated && - DISPLAY_VER(dev_priv) >= 10) - need_vblank_wait = true; - fbc->activated = false; - } -unlock: - mutex_unlock(&fbc->lock); + /* + * Display WA #1198: glk+ + * Need an extra vblank wait between FBC disable and most plane + * updates. Bspec says this is only needed for plane disable, but + * that is not true. Touching most plane registers will cause the + * corruption to appear. Also SKL/derivatives do not seem to be + * affected. + * + * TODO: could optimize this a bit by sampling the frame + * counter when we disable FBC (if it was already done earlier) + * and skipping the extra vblank wait before the plane update + * if at least one frame has already passed. + */ + if (fbc->activated && DISPLAY_VER(i915) >= 10) + need_vblank_wait = true; + fbc->activated = false; return need_vblank_wait; } -/** - * __intel_fbc_disable - disable FBC - * @dev_priv: i915 device instance - * - * This is the low level function that actually disables FBC. Callers should - * grab the FBC lock. - */ -static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_crtc *crtc = fbc->crtc; + const struct intel_plane_state *plane_state; + bool need_vblank_wait = false; + struct intel_plane *plane; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); - drm_WARN_ON(&dev_priv->drm, !fbc->crtc); - drm_WARN_ON(&dev_priv->drm, fbc->active); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); - drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + if (fbc->state.plane == plane) + need_vblank_wait |= __intel_fbc_pre_update(state, crtc, plane); - __intel_fbc_cleanup_cfb(dev_priv); + mutex_unlock(&fbc->lock); + } - fbc->crtc = NULL; + return need_vblank_wait; } -static void __intel_fbc_post_update(struct intel_crtc *crtc) +static void __intel_fbc_disable(struct intel_fbc *fbc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; + struct intel_plane *plane = fbc->state.plane; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, fbc->active); - if (fbc->crtc != crtc) - return; - - fbc->flip_pending = false; + drm_dbg_kms(&i915->drm, "Disabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); - if (!dev_priv->params.enable_fbc) { - intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); - __intel_fbc_disable(dev_priv); + __intel_fbc_cleanup_cfb(fbc); - return; - } + fbc->state.plane = NULL; +} - intel_fbc_get_reg_params(crtc, &fbc->params); +static void __intel_fbc_post_update(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; - if (!intel_fbc_can_activate(crtc)) - return; + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (!fbc->busy_bits) - intel_fbc_hw_activate(dev_priv); + intel_fbc_activate(fbc); else - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + intel_fbc_deactivate(fbc, "frontbuffer write"); } void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_plane_state *plane_state = - intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; - if (!plane->has_fbc || !plane_state) - return; + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - __intel_fbc_post_update(crtc); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (fbc->state.plane == plane) { + fbc->flip_pending = false; + __intel_fbc_post_update(fbc); + } + + mutex_unlock(&fbc->lock); + } } static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) { - if (fbc->crtc) - return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + if (fbc->state.plane) + return fbc->state.plane->frontbuffer_bit; else return fbc->possible_framebuffer_bits; } -void intel_fbc_invalidate(struct drm_i915_private *dev_priv, +void intel_fbc_invalidate(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; - if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) return; mutex_lock(&fbc->lock); fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (fbc->crtc && fbc->busy_bits) - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + if (fbc->state.plane && fbc->busy_bits) + intel_fbc_deactivate(fbc, "frontbuffer write"); mutex_unlock(&fbc->lock); } -void intel_fbc_flush(struct drm_i915_private *dev_priv, +void intel_fbc_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (!HAS_FBC(dev_priv)) - return; + struct intel_fbc *fbc = i915->fbc; - /* - * GTT tracking does not nuke the entire cfb - * so don't clear busy_bits set for some other - * reason. - */ - if (origin == ORIGIN_GTT) + if (!fbc) return; mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; - if (origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) goto out; - if (!fbc->busy_bits && fbc->crtc && + if (!fbc->busy_bits && fbc->state.plane && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) - intel_fbc_recompress(dev_priv); + intel_fbc_nuke(fbc); else if (!fbc->flip_pending) - __intel_fbc_post_update(fbc->crtc); + __intel_fbc_post_update(fbc); } out: mutex_unlock(&fbc->lock); } -/** - * intel_fbc_choose_crtc - select a CRTC to enable FBC on - * @dev_priv: i915 device instance - * @state: the atomic state structure - * - * This function looks at the proposed state for CRTCs and planes, then chooses - * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to - * true. - * - * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe - * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. - */ -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state) +int intel_fbc_atomic_check(struct intel_atomic_state *state) { - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_plane *plane; struct intel_plane_state *plane_state; - bool crtc_chosen = false; + struct intel_plane *plane; int i; - mutex_lock(&fbc->lock); - - /* Does this atomic commit involve the CRTC currently tied to FBC? */ - if (fbc->crtc && - !intel_atomic_get_new_crtc_state(state, fbc->crtc)) - goto out; - - if (!intel_fbc_can_enable(dev_priv)) - goto out; - - /* Simply choose the first CRTC that is compatible and has a visible - * plane. We could go for fancier schemes such as checking the plane - * size, but this would just affect the few platforms that don't tie FBC - * to pipe or plane A. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - - if (!plane->has_fbc) - continue; - - if (!plane_state->uapi.visible) - continue; - - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + int ret; - crtc_state->enable_fbc = true; - crtc_chosen = true; - break; + ret = intel_fbc_check_plane(state, plane); + if (ret) + return ret; } - if (!crtc_chosen) - fbc->no_fbc_reason = "no suitable CRTC for FBC"; - -out: - mutex_unlock(&fbc->lock); + return 0; } -/** - * intel_fbc_enable: tries to enable FBC on the CRTC - * @crtc: the CRTC - * @state: corresponding &drm_crtc_state for @crtc - * - * This function checks if the given CRTC was chosen for FBC, then enables it if - * possible. Notice that it doesn't activate FBC. It is valid to call - * intel_fbc_enable multiple times for the same pipe without an - * intel_fbc_disable in the middle, as long as it is deactivated. - */ -void intel_fbc_enable(struct intel_atomic_state *state, - struct intel_crtc *crtc) +static void __intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; - - if (!plane->has_fbc || !plane_state) - return; + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); + if (fbc->state.plane) { + if (fbc->state.plane != plane) + return; - if (fbc->crtc) { - if (fbc->crtc != crtc || - (!intel_fbc_cfb_size_changed(dev_priv) && - !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) - goto out; + if (intel_fbc_is_ok(plane_state)) + return; - __intel_fbc_disable(dev_priv); + __intel_fbc_disable(fbc); } - drm_WARN_ON(&dev_priv->drm, fbc->active); + drm_WARN_ON(&i915->drm, fbc->active); - intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->no_fbc_reason = plane_state->no_fbc_reason; + if (fbc->no_fbc_reason) + return; - /* FIXME crtc_state->enable_fbc lies :( */ - if (!cache->plane.visible) - goto out; + if (!intel_fbc_is_fence_ok(plane_state)) { + fbc->no_fbc_reason = "framebuffer not fenced"; + return; + } - if (intel_fbc_alloc_cfb(dev_priv, - intel_fbc_calculate_cfb_size(dev_priv, cache), - plane_state->hw.fb->format->cpp[0])) { - cache->plane.visible = false; - fbc->no_fbc_reason = "not enough stolen memory"; - goto out; + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "FIFO underrun"; + return; } - cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); + if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(plane_state), + intel_fbc_min_limit(plane_state))) { + fbc->no_fbc_reason = "not enough stolen memory"; + return; + } - drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&i915->drm, "Enabling FBC on [PLANE:%d:%s]\n", + plane->base.base.id, plane->base.name); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - fbc->crtc = crtc; + intel_fbc_update_state(state, crtc, plane); - intel_fbc_program_cfb(dev_priv); -out: - mutex_unlock(&fbc->lock); + intel_fbc_program_cfb(fbc); } /** @@ -1309,92 +1429,122 @@ out: */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_plane *plane; - if (!plane->has_fbc) - return; + for_each_intel_plane(&i915->drm, plane) { + struct intel_fbc *fbc = plane->fbc; - mutex_lock(&fbc->lock); - if (fbc->crtc == crtc) - __intel_fbc_disable(dev_priv); - mutex_unlock(&fbc->lock); + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + mutex_unlock(&fbc->lock); + } +} + +void intel_fbc_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; + int i; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + struct intel_fbc *fbc = plane->fbc; + + if (!fbc || plane->pipe != crtc->pipe) + continue; + + mutex_lock(&fbc->lock); + + if (crtc_state->update_pipe && plane_state->no_fbc_reason) { + if (fbc->state.plane == plane) + __intel_fbc_disable(fbc); + } else { + __intel_fbc_enable(state, crtc, plane); + } + + mutex_unlock(&fbc->lock); + } } /** * intel_fbc_global_disable - globally disable FBC - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function disables FBC regardless of which CRTC is associated with it. */ -void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; mutex_lock(&fbc->lock); - if (fbc->crtc) { - drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); - __intel_fbc_disable(dev_priv); - } + if (fbc->state.plane) + __intel_fbc_disable(fbc); mutex_unlock(&fbc->lock); } static void intel_fbc_underrun_work_fn(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, fbc.underrun_work); - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = container_of(work, typeof(*fbc), underrun_work); + struct drm_i915_private *i915 = fbc->i915; mutex_lock(&fbc->lock); /* Maybe we were scheduled twice. */ - if (fbc->underrun_detected || !fbc->crtc) + if (fbc->underrun_detected || !fbc->state.plane) goto out; - drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; - intel_fbc_deactivate(dev_priv, "FIFO underrun"); + intel_fbc_deactivate(fbc, "FIFO underrun"); + if (!fbc->flip_pending) + intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(i915, fbc->state.plane->pipe)); + __intel_fbc_disable(fbc); out: mutex_unlock(&fbc->lock); } /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @dev_priv: i915 device instance + * @i915: the i915 device * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +void intel_fbc_reset_underrun(struct drm_i915_private *i915) { - int ret; + struct intel_fbc *fbc = i915->fbc; + + if (!fbc) + return; - cancel_work_sync(&dev_priv->fbc.underrun_work); + cancel_work_sync(&fbc->underrun_work); - ret = mutex_lock_interruptible(&dev_priv->fbc.lock); - if (ret) - return ret; + mutex_lock(&fbc->lock); - if (dev_priv->fbc.underrun_detected) { - drm_dbg_kms(&dev_priv->drm, + if (fbc->underrun_detected) { + drm_dbg_kms(&i915->drm, "Re-allowing FBC after fifo underrun\n"); - dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + fbc->no_fbc_reason = "FIFO underrun cleared"; } - dev_priv->fbc.underrun_detected = false; - mutex_unlock(&dev_priv->fbc.lock); - - return 0; + fbc->underrun_detected = false; + mutex_unlock(&fbc->lock); } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun - * @dev_priv: i915 device instance + * @i915: i915 device * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can @@ -1406,11 +1556,11 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) * * This function is called from the IRQ handler. */ -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!fbc) return; /* There's no guarantee that underrun_detected won't be set to true @@ -1434,26 +1584,26 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) * space to change the value during runtime without sanitizing it again. IGT * relies on being able to change i915.enable_fbc at runtime. */ -static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +static int intel_sanitize_fbc_option(struct drm_i915_private *i915) { - if (dev_priv->params.enable_fbc >= 0) - return !!dev_priv->params.enable_fbc; + if (i915->params.enable_fbc >= 0) + return !!i915->params.enable_fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return 0; - if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9) + if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) return 1; return 0; } -static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +static bool need_fbc_vtd_wa(struct drm_i915_private *i915) { /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ - if (intel_vtd_active() && - (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { - drm_info(&dev_priv->drm, + if (intel_vtd_active(i915) && + (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { + drm_info(&i915->drm, "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); return true; } @@ -1461,38 +1611,171 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) return false; } +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) +{ + if (!fbc) + return; + + plane->fbc = fbc; + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; +} + +static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc; + + fbc = kzalloc(sizeof(*fbc), GFP_KERNEL); + if (!fbc) + return NULL; + + fbc->i915 = i915; + INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); + mutex_init(&fbc->lock); + + if (DISPLAY_VER(i915) >= 7) + fbc->funcs = &ivb_fbc_funcs; + else if (DISPLAY_VER(i915) == 6) + fbc->funcs = &snb_fbc_funcs; + else if (DISPLAY_VER(i915) == 5) + fbc->funcs = &ilk_fbc_funcs; + else if (IS_G4X(i915)) + fbc->funcs = &g4x_fbc_funcs; + else if (DISPLAY_VER(i915) == 4) + fbc->funcs = &i965_fbc_funcs; + else + fbc->funcs = &i8xx_fbc_funcs; + + return fbc; +} + /** * intel_fbc_init - Initialize FBC - * @dev_priv: the i915 device + * @i915: the i915 device * * This function might be called during PM init process. */ -void intel_fbc_init(struct drm_i915_private *dev_priv) +void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc; - INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); - mutex_init(&fbc->lock); - fbc->active = false; + if (!drm_mm_initialized(&i915->mm.stolen)) + mkwrite_device_info(i915)->display.has_fbc = false; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (need_fbc_vtd_wa(i915)) + mkwrite_device_info(i915)->display.has_fbc = false; - if (need_fbc_vtd_wa(dev_priv)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + i915->params.enable_fbc = intel_sanitize_fbc_option(i915); + drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", + i915->params.enable_fbc); - dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", - dev_priv->params.enable_fbc); + if (!HAS_FBC(i915)) + return; - if (!HAS_FBC(dev_priv)) { - fbc->no_fbc_reason = "unsupported by this chipset"; + fbc = intel_fbc_create(i915); + if (!fbc) return; - } /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ - if (intel_fbc_hw_is_active(dev_priv)) - intel_fbc_hw_deactivate(dev_priv); + if (intel_fbc_hw_is_active(fbc)) + intel_fbc_hw_deactivate(fbc); + + i915->fbc = fbc; +} + +static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) +{ + struct intel_fbc *fbc = m->private; + struct drm_i915_private *i915 = fbc->i915; + struct intel_plane *plane; + intel_wakeref_t wakeref; + + drm_modeset_lock_all(&i915->drm); + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + mutex_lock(&fbc->lock); + + if (fbc->active) { + seq_puts(m, "FBC enabled\n"); + seq_printf(m, "Compressing: %s\n", + yesno(intel_fbc_is_compressing(fbc))); + } else { + seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); + } + + for_each_intel_plane(&i915->drm, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (plane->fbc != fbc) + continue; + + seq_printf(m, "%c [PLANE:%d:%s]: %s\n", + fbc->state.plane == plane ? '*' : ' ', + plane->base.base.id, plane->base.name, + plane_state->no_fbc_reason ?: "FBC possible"); + } + + mutex_unlock(&fbc->lock); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + drm_modeset_unlock_all(&i915->drm); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_fbc_debugfs_status); + +static int intel_fbc_debugfs_false_color_get(void *data, u64 *val) +{ + struct intel_fbc *fbc = data; + + *val = fbc->false_color; + + return 0; +} + +static int intel_fbc_debugfs_false_color_set(void *data, u64 val) +{ + struct intel_fbc *fbc = data; + + mutex_lock(&fbc->lock); + + fbc->false_color = val; + + if (fbc->active) + fbc->funcs->set_false_color(fbc, fbc->false_color); + + mutex_unlock(&fbc->lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, + intel_fbc_debugfs_false_color_get, + intel_fbc_debugfs_false_color_set, + "%llu\n"); + +static void intel_fbc_debugfs_add(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_fbc_status", 0444, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_status_fops); + + if (fbc->funcs->set_false_color) + debugfs_create_file("i915_fbc_false_color", 0644, + minor->debugfs_root, fbc, + &intel_fbc_debugfs_false_color_fops); +} + +void intel_fbc_debugfs_register(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc = i915->fbc; + + if (fbc) + intel_fbc_debugfs_add(fbc); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 6dc1edefe81b..07ad0411fcc3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -8,23 +8,23 @@ #include <linux/types.h> -#include "intel_frontbuffer.h" - +enum fb_op_origin; struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_fbc; +struct intel_plane; struct intel_plane_state; -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, - struct intel_atomic_state *state); -bool intel_fbc_is_active(struct drm_i915_private *dev_priv); +int intel_fbc_atomic_check(struct intel_atomic_state *state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); -void intel_fbc_enable(struct intel_atomic_state *state, +void intel_fbc_cleanup(struct drm_i915_private *dev_priv); +void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_disable(struct intel_crtc *crtc); void intel_fbc_global_disable(struct drm_i915_private *dev_priv); @@ -33,8 +33,9 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv); +void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); +void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); +void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index df05d285f0bd..adc3a81be9f7 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -45,6 +45,8 @@ #include "i915_drv.h" #include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_fb_pin.h" #include "intel_fbdev.h" #include "intel_frontbuffer.h" @@ -229,8 +231,6 @@ static int intelfb_create(struct drm_fb_helper *helper, goto out_unlock; } - intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB); - info = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(info)) { drm_err(&dev_priv->drm, "Failed to allocate fb_info\n"); diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index e10b9cd8e86e..3d6e22923601 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -2,12 +2,113 @@ /* * Copyright © 2020 Intel Corporation */ + #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" +static void assert_fdi_tx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + bool cur_state; + + if (HAS_DDI(dev_priv)) { + /* + * DDI does not have a specific FDI_TX register. + * + * FDI is never fed from EDP transcoder + * so pipe->transcoder cast is fine here. + */ + enum transcoder cpu_transcoder = (enum transcoder)pipe; + cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; + } else { + cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; + } + I915_STATE_WARN(cur_state != state, + "FDI TX state assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} + +void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_tx(i915, pipe, true); +} + +void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_tx(i915, pipe, false); +} + +static void assert_fdi_rx(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + bool cur_state; + + cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; + I915_STATE_WARN(cur_state != state, + "FDI RX state assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} + +void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_rx(i915, pipe, true); +} + +void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_rx(i915, pipe, false); +} + +void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, + enum pipe pipe) +{ + bool cur_state; + + /* ILK FDI PLL is always enabled */ + if (IS_IRONLAKE(i915)) + return; + + /* On Haswell, DDI ports are responsible for the FDI PLL setup */ + if (HAS_DDI(i915)) + return; + + cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; + I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n"); +} + +static void assert_fdi_rx_pll(struct drm_i915_private *i915, + enum pipe pipe, bool state) +{ + bool cur_state; + + cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; + I915_STATE_WARN(cur_state != state, + "FDI RX PLL assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} + +void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_rx_pll(i915, pipe, true); +} + +void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe) +{ + assert_fdi_rx_pll(i915, pipe, false); +} + +void intel_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state); +} + /* units of 100MHz */ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) { @@ -57,7 +158,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -78,7 +179,7 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); + other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -91,8 +192,34 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } return 0; default: - BUG(); + MISSING_CASE(pipe); + return 0; + } +} + +void intel_fdi_pll_freq_update(struct drm_i915_private *i915) +{ + if (IS_IRONLAKE(i915)) { + u32 fdi_pll_clk = + intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; + + i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; + } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) { + i915->fdi_pll_freq = 270000; + } else { + return; } + + drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq); +} + +int intel_fdi_link_freq(struct drm_i915_private *i915, + const struct intel_crtc_state *pipe_config) +{ + if (HAS_DDI(i915)) + return pipe_config->port_clock; /* SPLL */ + else + return i915->fdi_pll_freq; } int ilk_fdi_compute_config(struct intel_crtc *crtc, @@ -140,11 +267,60 @@ retry: } if (needs_recompute) - return I915_DISPLAY_CONFIG_RETRY; + return -EAGAIN; return ret; } +static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) +{ + u32 temp; + + temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); + if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) + return; + + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & + FDI_RX_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & + FDI_RX_ENABLE); + + temp &= ~FDI_BC_BIFURCATION_SELECT; + if (enable) + temp |= FDI_BC_BIFURCATION_SELECT; + + drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", + enable ? "en" : "dis"); + intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); + intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); +} + +static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + switch (crtc->pipe) { + case PIPE_A: + break; + case PIPE_B: + if (crtc_state->fdi_lanes > 2) + cpt_set_fdi_bc_bifurcation(dev_priv, false); + else + cpt_set_fdi_bc_bifurcation(dev_priv, true); + + break; + case PIPE_C: + cpt_set_fdi_bc_bifurcation(dev_priv, true); + + break; + default: + MISSING_CASE(crtc->pipe); + } +} + void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -196,8 +372,15 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc, i915_reg_t reg; u32 temp, tries; + /* + * Write the TU size bits before fdi link training, so that error + * detection works. + */ + intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), + intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + /* FDI needs bits from pipe first */ - assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder); + assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder); /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -299,6 +482,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, i915_reg_t reg; u32 temp, i, retry; + /* + * Write the TU size bits before fdi link training, so that error + * detection works. + */ + intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), + intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); @@ -436,6 +626,15 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, i915_reg_t reg; u32 temp, i, j; + ivb_update_fdi_bc_bifurcation(crtc_state); + + /* + * Write the TU size bits before fdi link training, so that error + * detection works. + */ + intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), + intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); @@ -688,6 +887,43 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, DP_TP_CTL_ENABLE); } +void hsw_fdi_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + + /* + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, + * step 13 is the correct place for it. Step 18 is where it was + * originally before the BUN. + */ + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); + val &= ~DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + intel_ddi_disable_clock(encoder); + + val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_PCDCLK; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_PLL_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); +} + void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -807,15 +1043,27 @@ void ilk_fdi_disable(struct intel_crtc *crtc) udelay(100); } +static const struct intel_fdi_funcs ilk_funcs = { + .fdi_link_train = ilk_fdi_link_train, +}; + +static const struct intel_fdi_funcs gen6_funcs = { + .fdi_link_train = gen6_fdi_link_train, +}; + +static const struct intel_fdi_funcs ivb_funcs = { + .fdi_link_train = ivb_manual_fdi_link_train, +}; + void intel_fdi_init_hook(struct drm_i915_private *dev_priv) { if (IS_IRONLAKE(dev_priv)) { - dev_priv->display.fdi_link_train = ilk_fdi_link_train; + dev_priv->fdi_funcs = &ilk_funcs; } else if (IS_SANDYBRIDGE(dev_priv)) { - dev_priv->display.fdi_link_train = gen6_fdi_link_train; + dev_priv->fdi_funcs = &gen6_funcs; } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ - dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; + dev_priv->fdi_funcs = &ivb_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h index af01d2c173a8..1cdb86172702 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.h +++ b/drivers/gpu/drm/i915/display/intel_fdi.h @@ -6,12 +6,14 @@ #ifndef _INTEL_FDI_H_ #define _INTEL_FDI_H_ +enum pipe; struct drm_i915_private; struct intel_crtc; struct intel_crtc_state; struct intel_encoder; -#define I915_DISPLAY_CONFIG_RETRY 1 +int intel_fdi_link_freq(struct drm_i915_private *i915, + const struct intel_crtc_state *pipe_config); int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config); void intel_fdi_normal_train(struct intel_crtc *crtc); @@ -21,5 +23,18 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state); void intel_fdi_init_hook(struct drm_i915_private *dev_priv); void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void hsw_fdi_disable(struct intel_encoder *encoder); +void intel_fdi_pll_freq_update(struct drm_i915_private *i915); + +void intel_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); + +void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); +void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index eb841960840d..d636d21fa9ce 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -26,8 +26,8 @@ */ #include "i915_drv.h" -#include "i915_trace.h" #include "intel_de.h" +#include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" @@ -61,7 +61,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->cpu_fifo_underrun_disabled) return false; @@ -79,7 +79,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) lockdep_assert_held(&dev_priv->irq_lock); for_each_pipe(dev_priv, pipe) { - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = intel_crtc_for_pipe(dev_priv, pipe); if (crtc->pch_fifo_underrun_disabled) return false; @@ -279,7 +279,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); bool old; lockdep_assert_held(&dev_priv->irq_lock); @@ -348,7 +348,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, bool enable) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, pch_transcoder); + intel_crtc_for_pipe(dev_priv, pch_transcoder); unsigned long flags; bool old; @@ -391,7 +391,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); u32 underruns = 0; /* We may be called too early in init, thanks BIOS! */ diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 8e75debcce1a..791248f812aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -55,11 +55,11 @@ * cancelled as soon as busyness is detected. */ -#include "display/intel_dp.h" - #include "i915_drv.h" -#include "i915_trace.h" +#include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_drrs.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" #include "intel_psr.h" @@ -91,7 +91,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915, trace_intel_frontbuffer_flush(frontbuffer_bits, origin); might_sleep(); - intel_edp_drrs_flush(i915, frontbuffer_bits); + intel_drrs_flush(i915, frontbuffer_bits); intel_psr_flush(i915, frontbuffer_bits, origin); intel_fbc_flush(i915, frontbuffer_bits, origin); } @@ -180,7 +180,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front, might_sleep(); intel_psr_invalidate(i915, frontbuffer_bits, origin); - intel_edp_drrs_invalidate(i915, frontbuffer_bits); + intel_drrs_invalidate(i915, frontbuffer_bits); intel_fbc_invalidate(i915, frontbuffer_bits, origin); } diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h index 6d41f5394425..ff0c37b079aa 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h @@ -28,16 +28,16 @@ #include <linux/kref.h> #include "gem/i915_gem_object_types.h" -#include "i915_active.h" +#include "i915_active_types.h" struct drm_i915_private; enum fb_op_origin { - ORIGIN_GTT, - ORIGIN_CPU, + ORIGIN_CPU = 0, ORIGIN_CS, ORIGIN_FLIP, ORIGIN_DIRTYFB, + ORIGIN_CURSOR_UPDATE, }; struct intel_frontbuffer { diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index ceb1bf8a8c3c..3b8b84177085 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -334,6 +334,15 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo->data = bus; } +static bool has_gmbus_irq(struct drm_i915_private *i915) +{ + /* + * encoder->shutdown() may want to use GMBUS + * after irqs have already been disabled. + */ + return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); +} + static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) { DEFINE_WAIT(wait); @@ -344,7 +353,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!HAS_GMBUS_IRQ(dev_priv)) + if (!has_gmbus_irq(dev_priv)) irq_en = 0; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); @@ -375,7 +384,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (HAS_GMBUS_IRQ(dev_priv)) + if (has_gmbus_irq(dev_priv)) irq_enable = GMBUS_IDLE_EN; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index ebc2e32aec0b..4509fe7438e8 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -17,12 +17,12 @@ #include "i915_drv.h" #include "i915_reg.h" -#include "intel_display_power.h" +#include "intel_connector.h" #include "intel_de.h" +#include "intel_display_power.h" #include "intel_display_types.h" #include "intel_hdcp.h" -#include "intel_sideband.h" -#include "intel_connector.h" +#include "intel_pcode.h" #define KEY_LOAD_TRIES 5 #define HDCP2_LC_RETRY_CNT 3 @@ -33,21 +33,6 @@ static int intel_conn_to_vcpi(struct intel_connector *connector) return connector->port ? connector->port->vcpi.vcpi : 0; } -static bool -intel_streams_type1_capable(struct intel_connector *connector) -{ - const struct intel_hdcp_shim *shim = connector->hdcp.shim; - bool capable = false; - - if (!shim) - return capable; - - if (shim->streams_type1_capable) - shim->streams_type1_capable(connector, &capable); - - return capable; -} - /* * intel_hdcp_required_content_stream selects the most highest common possible HDCP * content_type for all streams in DP MST topology because security f/w doesn't @@ -86,7 +71,7 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) if (conn_dig_port != dig_port) continue; - if (!enforce_type0 && !intel_streams_type1_capable(connector)) + if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable) enforce_type0 = true; data->streams[data->k].stream_id = intel_conn_to_vcpi(connector); @@ -112,6 +97,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) return 0; } +static int intel_hdcp_prepare_streams(struct intel_connector *connector) +{ + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); + struct hdcp_port_data *data = &dig_port->hdcp_port_data; + struct intel_hdcp *hdcp = &connector->hdcp; + int ret; + + if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { + data->k = 1; + data->streams[0].stream_type = hdcp->content_type; + } else { + ret = intel_hdcp_required_content_stream(dig_port); + if (ret) + return ret; + } + + return 0; +} + static bool intel_hdcp_is_ksv_valid(u8 *ksv) { @@ -1632,6 +1636,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) return -EINVAL; } + /* + * MST topology is not Type 1 capable if it contains a downstream + * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. + */ + dig_port->hdcp_mst_type1_capable = + !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && + !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); + /* Converting and Storing the seq_num_v to local variable as DWORD */ seq_num_v = drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); @@ -1876,6 +1888,14 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { ret = hdcp2_authenticate_sink(connector); if (!ret) { + ret = intel_hdcp_prepare_streams(connector); + if (ret) { + drm_dbg_kms(&i915->drm, + "Prepare streams failed.(%d)\n", + ret); + break; + } + ret = hdcp2_propagate_stream_management_info(connector); if (ret) { drm_dbg_kms(&i915->drm, @@ -1921,9 +1941,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) static int _intel_hdcp2_enable(struct intel_connector *connector) { - struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct hdcp_port_data *data = &dig_port->hdcp_port_data; struct intel_hdcp *hdcp = &connector->hdcp; int ret; @@ -1931,16 +1949,6 @@ static int _intel_hdcp2_enable(struct intel_connector *connector) connector->base.name, connector->base.base.id, hdcp->content_type); - /* Stream which requires encryption */ - if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { - data->k = 1; - data->streams[0].stream_type = hdcp->content_type; - } else { - ret = intel_hdcp_required_content_stream(dig_port); - if (ret) - return ret; - } - ret = hdcp2_authenticate_and_encrypt(connector); if (ret) { drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index b04685bb6439..3b5b9e7b05b7 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -53,21 +53,20 @@ #include "intel_panel.h" #include "intel_snps_phy.h" -static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi) +static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi) { - return hdmi_to_dig_port(intel_hdmi)->base.base.dev; + return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev); } static void assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { - struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi); u32 enabled_bits; enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; - drm_WARN(dev, + drm_WARN(&dev_priv->drm, intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits, "HDMI port enabled, expecting disabled\n"); } @@ -1246,13 +1245,14 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) { - struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); - struct i2c_adapter *adapter = - intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); + struct i2c_adapter *adapter; if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) return; + adapter = intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n", enable ? "Enabling" : "Disabling"); @@ -1703,7 +1703,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port, drm_dbg_kms(&i915->drm, "msg_sz(%zd) is more than exp size(%zu)\n", ret, size); - return -1; + return -EINVAL; } offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; @@ -1800,6 +1800,11 @@ static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } +static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420; +} + static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_downstream_limits, bool has_hdmi_sink) @@ -1830,7 +1835,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, int clock, bool respect_downstream_limits, bool has_hdmi_sink) { - struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); + struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); if (clock < 25000) return MODE_CLOCK_LOW; @@ -1864,8 +1869,12 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_OK; } -static int intel_hdmi_port_clock(int clock, int bpc) +static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) { + /* YCBCR420 TMDS rate requirement is half the pixel clock */ + if (ycbcr420_output) + clock /= 2; + /* * Need to adjust the port link by: * 1.5x for 12bpc @@ -1874,18 +1883,29 @@ static int intel_hdmi_port_clock(int clock, int bpc) return clock * bpc / 8; } -static bool intel_hdmi_bpc_possible(struct drm_connector *connector, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) +static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) +{ + switch (bpc) { + case 12: + return !HAS_GMCH(i915); + case 10: + return DISPLAY_VER(i915) >= 11; + case 8: + return true; + default: + MISSING_CASE(bpc); + return false; + } +} + +static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { - struct drm_i915_private *i915 = to_i915(connector->dev); const struct drm_display_info *info = &connector->display_info; const struct drm_hdmi_info *hdmi = &info->hdmi; switch (bpc) { case 12: - if (HAS_GMCH(i915)) - return false; - if (!has_hdmi_sink) return false; @@ -1894,9 +1914,6 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, else return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; case 10: - if (DISPLAY_VER(i915) < 11) - return false; - if (!has_hdmi_sink) return false; @@ -1916,26 +1933,26 @@ static enum drm_mode_status intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, bool has_hdmi_sink, bool ycbcr420_output) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum drm_mode_status status; - if (ycbcr420_output) - clock /= 2; - /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8), + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8bpc we may still be able to do 12bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12), + intel_hdmi_source_bpc_possible(i915, 12) && + intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8,12bpc we may still be able to do 10bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10), + intel_hdmi_source_bpc_possible(i915, 10) && + intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output), true, has_hdmi_sink); return status; @@ -1946,8 +1963,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); - struct drm_device *dev = intel_hdmi_to_dev(hdmi); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi); enum drm_mode_status status; int clock = mode->clock; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -2001,7 +2017,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) return false; } @@ -2016,6 +2032,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) + return false; + /* * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. @@ -2024,7 +2043,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return false; /* Display Wa_1405510057:icl,ehl */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + if (intel_hdmi_is_ycbcr420(crtc_state) && bpc == 10 && DISPLAY_VER(dev_priv) == 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) @@ -2032,8 +2051,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return intel_hdmi_deep_color_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, - crtc_state->output_format == - INTEL_OUTPUT_FORMAT_YCBCR420); + intel_hdmi_is_ycbcr420(crtc_state)); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, @@ -2041,12 +2059,13 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, int clock) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; for (bpc = 12; bpc >= 10; bpc -= 2) { if (hdmi_deep_color_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, - intel_hdmi_port_clock(clock, bpc), + intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), true, crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } @@ -2066,13 +2085,10 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; - /* YCBCR420 TMDS rate requirement is half the pixel clock */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - clock /= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); - crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); + crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, + intel_hdmi_is_ycbcr420(crtc_state)); /* * pipe_bpp could already be below 8bpc due to @@ -2142,34 +2158,44 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder, return intel_conn_state->force_audio == HDMI_AUDIO_ON; } +static enum intel_output_format +intel_hdmi_output_format(struct intel_connector *connector, + bool ycbcr_420_output) +{ + if (connector->base.ycbcr_420_allowed && ycbcr_420_output) + return INTEL_OUTPUT_FORMAT_YCBCR420; + else + return INTEL_OUTPUT_FORMAT_RGB; +} + static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_connector *connector = conn_state->connector; - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + const struct drm_display_info *info = &connector->base.display_info; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; - bool ycbcr_420_only; - ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode); - if (connector->ycbcr_420_allowed && ycbcr_420_only) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - } else { - if (!connector->ycbcr_420_allowed && ycbcr_420_only) - drm_dbg_kms(&i915->drm, - "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); + crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only); + + if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) { + drm_dbg_kms(&i915->drm, + "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; } ret = intel_hdmi_compute_clock(encoder, crtc_state); if (ret) { - if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && - connector->ycbcr_420_allowed && - drm_mode_is_420_also(&connector->display_info, adjusted_mode)) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - ret = intel_hdmi_compute_clock(encoder, crtc_state); - } + if (intel_hdmi_is_ycbcr420(crtc_state) || + !connector->base.ycbcr_420_allowed || + !drm_mode_is_420_also(info, adjusted_mode)) + return ret; + + crtc_state->output_format = intel_hdmi_output_format(connector, true); + ret = intel_hdmi_compute_clock(encoder, crtc_state); } return ret; @@ -2209,8 +2235,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (ret) return ret; - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { - ret = intel_pch_panel_fitting(pipe_config, conn_state); + if (intel_hdmi_is_ycbcr420(pipe_config)) { + ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; } @@ -2260,6 +2286,17 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return 0; } +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + /* + * Give a hand to buggy BIOSen which forget to turn + * the TMDS output buffers back on after a reboot. + */ + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); +} + static void intel_hdmi_unset_edid(struct drm_connector *connector) { diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index b43a180d007e..2bf440eb400a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -28,6 +28,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port, int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); +void intel_hdmi_encoder_shutdown(struct intel_encoder *encoder); bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, struct drm_connector *connector, bool high_tmds_clock_ratio, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 47c85ac97c87..955f6d07b0e1 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -215,8 +215,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) static void intel_hpd_irq_setup(struct drm_i915_private *i915) { - if (i915->display_irqs_enabled && i915->display.hpd_irq_setup) - i915->display.hpd_irq_setup(i915); + if (i915->display_irqs_enabled && i915->hotplug_funcs) + i915->hotplug_funcs->hpd_irq_setup(i915); } static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 7f3c638c8950..4970bf146c4a 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -74,7 +74,7 @@ #include "intel_de.h" #include "intel_lpe_audio.h" -#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL) +#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) @@ -96,7 +96,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); } - rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq; + rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; @@ -148,7 +148,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) * than us fiddle with its internals. */ - platform_device_unregister(dev_priv->lpe_audio.platdev); + platform_device_unregister(dev_priv->audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -167,7 +167,7 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { - int irq = dev_priv->lpe_audio.irq; + int irq = dev_priv->audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, @@ -204,15 +204,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; - dev_priv->lpe_audio.irq = irq_alloc_desc(0); - if (dev_priv->lpe_audio.irq < 0) { + dev_priv->audio.lpe.irq = irq_alloc_desc(0); + if (dev_priv->audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", - dev_priv->lpe_audio.irq); - ret = dev_priv->lpe_audio.irq; + dev_priv->audio.lpe.irq); + ret = dev_priv->audio.lpe.irq; goto err; } - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); @@ -223,10 +223,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) goto err_free_irq; } - dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv); + dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); - if (IS_ERR(dev_priv->lpe_audio.platdev)) { - ret = PTR_ERR(dev_priv->lpe_audio.platdev); + if (IS_ERR(dev_priv->audio.lpe.platdev)) { + ret = PTR_ERR(dev_priv->audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); @@ -241,10 +241,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) return 0; err_free_irq: - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); err: - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; return ret; } @@ -262,7 +262,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) if (!HAS_LPE_AUDIO(dev_priv)) return; - ret = generic_handle_irq(dev_priv->lpe_audio.irq); + ret = generic_handle_irq(dev_priv->audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); @@ -303,10 +303,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; } /** @@ -333,7 +333,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (!HAS_LPE_AUDIO(dev_priv)) return; - pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev); + pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); @@ -361,7 +361,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, } if (pdata->notify_audio_lpe) - pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B); + pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index e0381b0fce91..9fced37bed70 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -40,9 +40,12 @@ #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_backlight.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dpll.h" +#include "intel_fdi.h" #include "intel_gmbus.h" #include "intel_lvds.h" #include "intel_panel.h" @@ -323,7 +326,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state, drm_err(&dev_priv->drm, "timed out waiting for panel to power on\n"); - intel_panel_enable_backlight(pipe_config, conn_state); + intel_backlight_enable(pipe_config, conn_state); } static void intel_disable_lvds(struct intel_atomic_state *state, @@ -351,7 +354,7 @@ static void gmch_disable_lvds(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { - intel_panel_disable_backlight(old_conn_state); + intel_backlight_disable(old_conn_state); intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state); } @@ -361,7 +364,7 @@ static void pch_disable_lvds(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_panel_disable_backlight(old_conn_state); + intel_backlight_disable(old_conn_state); } static void pch_post_disable_lvds(struct intel_atomic_state *state, @@ -388,13 +391,15 @@ intel_lvds_mode_valid(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; + enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; + + status = intel_panel_mode_valid(intel_connector, mode); + if (status != MODE_OK) + return status; + if (fixed_mode->clock > max_pixclk) return MODE_CLOCK_HIGH; @@ -441,8 +446,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - intel_fixed_panel_mode(intel_connector->panel.fixed_mode, - adjusted_mode); + ret = intel_panel_compute_config(intel_connector, adjusted_mode); + if (ret) + return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -450,10 +456,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, if (HAS_PCH_SPLIT(dev_priv)) pipe_config->has_pch_encoder = true; - if (HAS_GMCH(dev_priv)) - ret = intel_gmch_panel_fitting(pipe_config, conn_state); - else - ret = intel_pch_panel_fitting(pipe_config, conn_state); + ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; @@ -906,7 +909,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; - intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->update_pipe = intel_backlight_update; intel_encoder->shutdown = intel_lvds_shutdown; intel_connector->get_hw_state = intel_connector_get_hw_state; @@ -999,7 +1002,7 @@ out: mutex_unlock(&dev->mode_config.mutex); intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); - intel_panel_setup_backlight(connector, INVALID_PIPE); + intel_backlight_setup(intel_connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n", diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 3855fba70980..0065111593a6 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -30,10 +30,9 @@ #include <linux/firmware.h> #include <acpi/video.h> -#include "display/intel_panel.h" - #include "i915_drv.h" #include "intel_acpi.h" +#include "intel_backlight.h" #include "intel_display_types.h" #include "intel_opregion.h" @@ -450,7 +449,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) bclp); drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) - intel_panel_set_backlight_acpi(connector->base.state, bclp, 255); + intel_backlight_set_acpi(connector->base.state, bclp, 255); drm_connector_list_iter_end(&conn_iter); asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 7d7a60b4d2de..a0c8e43db5eb 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -28,26 +28,51 @@ * Chris Wilson <chris@chris-wilson.co.uk> */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/kernel.h> -#include <linux/moduleparam.h> #include <linux/pwm.h> +#include "intel_backlight.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_types.h" -#include "intel_dp_aux_backlight.h" -#include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" -void -intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, - struct drm_display_mode *adjusted_mode) +bool intel_panel_use_ssc(struct drm_i915_private *i915) +{ + if (i915->params.panel_use_ssc >= 0) + return i915->params.panel_use_ssc != 0; + return i915->vbt.lvds_use_ssc + && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE); +} + +int intel_panel_compute_config(struct intel_connector *connector, + struct drm_display_mode *adjusted_mode) { + const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode; + + if (!fixed_mode) + return 0; + + /* + * We don't want to lie too much to the user about the refresh + * rate they're going to get. But we have to allow a bit of latitude + * for Xorg since it likes to automagically cook up modes with slightly + * off refresh rates. + */ + if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) { + drm_dbg_kms(connector->base.dev, + "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n", + connector->base.base.id, connector->base.name, + drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode)); + + return -EINVAL; + } + drm_mode_copy(adjusted_mode, fixed_mode); drm_mode_set_crtcinfo(adjusted_mode, 0); + + return 0; } static bool is_downclock_mode(const struct drm_display_mode *downclock_mode, @@ -175,8 +200,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector) } /* adjusted_mode has been preset to be the panel's fixed mode */ -int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static int pch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -381,8 +406,8 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state, } } -int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static int gmch_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -456,1783 +481,55 @@ out: return 0; } -/** - * scale - scale values from one range to another - * @source_val: value in range [@source_min..@source_max] - * @source_min: minimum legal value for @source_val - * @source_max: maximum legal value for @source_val - * @target_min: corresponding target value for @source_min - * @target_max: corresponding target value for @source_max - * - * Return @source_val in range [@source_min..@source_max] scaled to range - * [@target_min..@target_max]. - */ -static u32 scale(u32 source_val, - u32 source_min, u32 source_max, - u32 target_min, u32 target_max) -{ - u64 target_val; - - WARN_ON(source_min > source_max); - WARN_ON(target_min > target_max); - - /* defensive */ - source_val = clamp(source_val, source_min, source_max); - - /* avoid overflows */ - target_val = mul_u32_u32(source_val - source_min, - target_max - target_min); - target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); - target_val += target_min; - - return target_val; -} - -/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result - * to [hw_min..hw_max]. */ -static u32 clamp_user_to_hw(struct intel_connector *connector, - u32 user_level, u32 user_max) -{ - struct intel_panel *panel = &connector->panel; - u32 hw_level; - - hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max); - hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max); - - return hw_level; -} - -/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */ -static u32 scale_hw_to_user(struct intel_connector *connector, - u32 hw_level, u32 user_max) -{ - struct intel_panel *panel = &connector->panel; - - return scale(hw_level, panel->backlight.min, panel->backlight.max, - 0, user_max); -} - -u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); - - if (dev_priv->params.invert_brightness < 0) - return val; - - if (dev_priv->params.invert_brightness > 0 || - dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { - return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; - } - - return val; -} - -void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val); - panel->backlight.pwm_funcs->set(conn_state, val); -} - -u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - drm_WARN_ON_ONCE(&dev_priv->drm, - panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); - - val = scale(val, panel->backlight.min, panel->backlight.max, - panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); - - return intel_panel_invert_pwm_level(connector, val); -} - -u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - drm_WARN_ON_ONCE(&dev_priv->drm, - panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); - - if (dev_priv->params.invert_brightness > 0 || - (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)) - val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); - - return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max, - panel->backlight.min, panel->backlight.max); -} - -static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; -} - -static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; -} - -static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 val; - - val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; - if (DISPLAY_VER(dev_priv) < 4) - val >>= 1; - - if (panel->backlight.combination_mode) { - u8 lbpc; - - pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc); - val *= lbpc; - } - - return val; -} - -static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) - return 0; - - return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; -} - -static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - return intel_de_read(dev_priv, - BXT_BLC_PWM_DUTY(panel->backlight.controller)); -} - -static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct intel_panel *panel = &connector->panel; - struct pwm_state state; - - pwm_get_state(panel->backlight.pwm, &state); - return pwm_get_relative_duty_cycle(&state, 100); -} - -static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; - intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level); -} - -static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u32 tmp; - - tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; - intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level); -} - -static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 tmp, mask; - - drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); - - if (panel->backlight.combination_mode) { - u8 lbpc; - - lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1; - level /= lbpc; - pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc); - } - - if (DISPLAY_VER(dev_priv) == 4) { - mask = BACKLIGHT_DUTY_CYCLE_MASK; - } else { - level <<= 1; - mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV; - } - - tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask; - intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level); -} - -static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; - u32 tmp; - - tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; - intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level); -} - -static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - intel_de_write(dev_priv, - BXT_BLC_PWM_DUTY(panel->backlight.controller), level); -} - -static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; - - pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); -} - -static void -intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level); - - panel->backlight.funcs->set(conn_state, level); -} - -/* set backlight brightness to level in range [0..max], assuming hw min is - * respected. - */ -void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state, - u32 user_level, u32 user_max) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 hw_level; - - /* - * Lack of crtc may occur during driver init because - * connection_mutex isn't held across the entire backlight - * setup + modeset readout, and the BIOS can issue the - * requests at any time. - */ - if (!panel->backlight.present || !conn_state->crtc) - return; - - mutex_lock(&dev_priv->backlight_lock); - - drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); - - hw_level = clamp_user_to_hw(connector, user_level, user_max); - panel->backlight.level = hw_level; - - if (panel->backlight.device) - panel->backlight.device->props.brightness = - scale_hw_to_user(connector, - panel->backlight.level, - panel->backlight.device->props.max_brightness); - - if (panel->backlight.enabled) - intel_panel_actually_set_backlight(conn_state, hw_level); - - mutex_unlock(&dev_priv->backlight_lock); -} - -static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, level); - - /* - * Although we don't support or enable CPU PWM with LPT/SPT based - * systems, it may have been enabled prior to loading the - * driver. Disable to avoid warnings on LCPLL disable. - * - * This needs rework if we need to add support for CPU PWM on PCH split - * platforms. - */ - tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); - if (tmp & BLM_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, - "cpu backlight was enabled, disabling\n"); - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, - tmp & ~BLM_PWM_ENABLE); - } - - tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); -} - -static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, val); - - tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); - - tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); -} - -static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - intel_panel_set_pwm_level(old_conn_state, val); -} - -static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev); - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, val); - - tmp = intel_de_read(dev_priv, BLC_PWM_CTL2); - intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); -} - -static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, val); - - tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); - intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), - tmp & ~BLM_PWM_ENABLE); -} - -static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, val); - - tmp = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); - - if (panel->backlight.controller == 1) { - val = intel_de_read(dev_priv, UTIL_PIN_CTL); - val &= ~UTIL_PIN_ENABLE; - intel_de_write(dev_priv, UTIL_PIN_CTL, val); - } -} - -static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 tmp; - - intel_panel_set_pwm_level(old_conn_state, val); - - tmp = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); -} - -static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct intel_panel *panel = &connector->panel; - - panel->backlight.pwm_state.enabled = false; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); -} - -void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state) -{ - struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - if (!panel->backlight.present) - return; - - /* - * Do not disable backlight on the vga_switcheroo path. When switching - * away from i915, the other client may depend on i915 to handle the - * backlight. This will leave the backlight on unnecessarily when - * another client is not activated. - */ - if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { - drm_dbg_kms(&dev_priv->drm, - "Skipping backlight disable on vga switch\n"); - return; - } - - mutex_lock(&dev_priv->backlight_lock); - - if (panel->backlight.device) - panel->backlight.device->props.power = FB_BLANK_POWERDOWN; - panel->backlight.enabled = false; - panel->backlight.funcs->disable(old_conn_state, 0); - - mutex_unlock(&dev_priv->backlight_lock); -} - -static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 pch_ctl1, pch_ctl2, schicken; - - pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); - pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); - } - - if (HAS_PCH_LPT(dev_priv)) { - schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2); - if (panel->backlight.alternate_pwm_increment) - schicken |= LPT_PWM_GRANULARITY; - else - schicken &= ~LPT_PWM_GRANULARITY; - intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken); - } else { - schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1); - if (panel->backlight.alternate_pwm_increment) - schicken |= SPT_PWM_GRANULARITY; - else - schicken &= ~SPT_PWM_GRANULARITY; - intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken); - } - - pch_ctl2 = panel->backlight.pwm_level_max << 16; - intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); - - pch_ctl1 = 0; - if (panel->backlight.active_low_pwm) - pch_ctl1 |= BLM_PCH_POLARITY; - - /* After LPT, override is the default. */ - if (HAS_PCH_LPT(dev_priv)) - pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; - - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); - intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, - pch_ctl1 | BLM_PCH_PWM_ENABLE); - - /* This won't stick until the above enable. */ - intel_panel_set_pwm_level(conn_state, level); -} - -static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - u32 cpu_ctl2, pch_ctl1, pch_ctl2; - - cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); - if (cpu_ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n"); - cpu_ctl2 &= ~BLM_PWM_ENABLE; - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); - } - - pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); - pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); - } - - if (cpu_transcoder == TRANSCODER_EDP) - cpu_ctl2 = BLM_TRANSCODER_EDP; - else - cpu_ctl2 = BLM_PIPE(cpu_transcoder); - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); - intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2); - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); - - /* This won't stick until the above enable. */ - intel_panel_set_pwm_level(conn_state, level); - - pch_ctl2 = panel->backlight.pwm_level_max << 16; - intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); - - pch_ctl1 = 0; - if (panel->backlight.active_low_pwm) - pch_ctl1 |= BLM_PCH_POLARITY; - - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); - intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, - pch_ctl1 | BLM_PCH_PWM_ENABLE); -} - -static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 ctl, freq; - - ctl = intel_de_read(dev_priv, BLC_PWM_CTL); - if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); - intel_de_write(dev_priv, BLC_PWM_CTL, 0); - } - - freq = panel->backlight.pwm_level_max; - if (panel->backlight.combination_mode) - freq /= 0xff; - - ctl = freq << 17; - if (panel->backlight.combination_mode) - ctl |= BLM_LEGACY_MODE; - if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm) - ctl |= BLM_POLARITY_PNV; - - intel_de_write(dev_priv, BLC_PWM_CTL, ctl); - intel_de_posting_read(dev_priv, BLC_PWM_CTL); - - /* XXX: combine this into above write? */ - intel_panel_set_pwm_level(conn_state, level); - - /* - * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is - * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 - * that has backlight. - */ - if (DISPLAY_VER(dev_priv) == 2) - intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); -} - -static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; - u32 ctl, ctl2, freq; - - ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); - if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); - ctl2 &= ~BLM_PWM_ENABLE; - intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); - } - - freq = panel->backlight.pwm_level_max; - if (panel->backlight.combination_mode) - freq /= 0xff; - - ctl = freq << 16; - intel_de_write(dev_priv, BLC_PWM_CTL, ctl); - - ctl2 = BLM_PIPE(pipe); - if (panel->backlight.combination_mode) - ctl2 |= BLM_COMBINATION_MODE; - if (panel->backlight.active_low_pwm) - ctl2 |= BLM_POLARITY_I965; - intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); - intel_de_posting_read(dev_priv, BLC_PWM_CTL2); - intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); - - intel_panel_set_pwm_level(conn_state, level); -} - -static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - u32 ctl, ctl2; - - ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); - if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); - ctl2 &= ~BLM_PWM_ENABLE; - intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); - } - - ctl = panel->backlight.pwm_level_max << 16; - intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl); - - /* XXX: combine this into above write? */ - intel_panel_set_pwm_level(conn_state, level); - - ctl2 = 0; - if (panel->backlight.active_low_pwm) - ctl2 |= BLM_POLARITY_I965; - intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); - intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); - intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), - ctl2 | BLM_PWM_ENABLE); -} - -static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - u32 pwm_ctl, val; - - /* Controller 1 uses the utility pin. */ - if (panel->backlight.controller == 1) { - val = intel_de_read(dev_priv, UTIL_PIN_CTL); - if (val & UTIL_PIN_ENABLE) { - drm_dbg_kms(&dev_priv->drm, - "util pin already enabled\n"); - val &= ~UTIL_PIN_ENABLE; - intel_de_write(dev_priv, UTIL_PIN_CTL, val); - } - - val = 0; - if (panel->backlight.util_pin_active_low) - val |= UTIL_PIN_POLARITY; - intel_de_write(dev_priv, UTIL_PIN_CTL, - val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); - } - - pwm_ctl = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); - pwm_ctl &= ~BXT_BLC_PWM_ENABLE; - intel_de_write(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); - } - - intel_de_write(dev_priv, - BXT_BLC_PWM_FREQ(panel->backlight.controller), - panel->backlight.pwm_level_max); - - intel_panel_set_pwm_level(conn_state, level); - - pwm_ctl = 0; - if (panel->backlight.active_low_pwm) - pwm_ctl |= BXT_BLC_PWM_POLARITY; - - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); - intel_de_posting_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl | BXT_BLC_PWM_ENABLE); -} - -static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 pwm_ctl; - - pwm_ctl = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); - pwm_ctl &= ~BXT_BLC_PWM_ENABLE; - intel_de_write(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); - } - - intel_de_write(dev_priv, - BXT_BLC_PWM_FREQ(panel->backlight.controller), - panel->backlight.pwm_level_max); - - intel_panel_set_pwm_level(conn_state, level); - - pwm_ctl = 0; - if (panel->backlight.active_low_pwm) - pwm_ctl |= BXT_BLC_PWM_POLARITY; - - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); - intel_de_posting_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl | BXT_BLC_PWM_ENABLE); -} - -static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_panel *panel = &connector->panel; - - pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); - panel->backlight.pwm_state.enabled = true; - pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); -} - -static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_panel *panel = &connector->panel; - - WARN_ON(panel->backlight.max == 0); - - if (panel->backlight.level <= panel->backlight.min) { - panel->backlight.level = panel->backlight.max; - if (panel->backlight.device) - panel->backlight.device->props.brightness = - scale_hw_to_user(connector, - panel->backlight.level, - panel->backlight.device->props.max_brightness); - } - - panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); - panel->backlight.enabled = true; - if (panel->backlight.device) - panel->backlight.device->props.power = FB_BLANK_UNBLANK; -} - -void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - - if (!panel->backlight.present) - return; - - drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe)); - - mutex_lock(&dev_priv->backlight_lock); - - __intel_panel_enable_backlight(crtc_state, conn_state); - - mutex_unlock(&dev_priv->backlight_lock); -} - -#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -static u32 intel_panel_get_backlight(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 val = 0; - - mutex_lock(&dev_priv->backlight_lock); - - if (panel->backlight.enabled) - val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector)); - - mutex_unlock(&dev_priv->backlight_lock); - - drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val); - return val; -} - -/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */ -static u32 scale_user_to_hw(struct intel_connector *connector, - u32 user_level, u32 user_max) -{ - struct intel_panel *panel = &connector->panel; - - return scale(user_level, 0, user_max, - panel->backlight.min, panel->backlight.max); -} - -/* set backlight brightness to level in range [0..max], scaling wrt hw min */ -static void intel_panel_set_backlight(const struct drm_connector_state *conn_state, - u32 user_level, u32 user_max) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 hw_level; - - if (!panel->backlight.present) - return; - - mutex_lock(&dev_priv->backlight_lock); - - drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); - - hw_level = scale_user_to_hw(connector, user_level, user_max); - panel->backlight.level = hw_level; - - if (panel->backlight.enabled) - intel_panel_actually_set_backlight(conn_state, hw_level); - - mutex_unlock(&dev_priv->backlight_lock); -} - -static int intel_backlight_device_update_status(struct backlight_device *bd) -{ - struct intel_connector *connector = bl_get_data(bd); - struct intel_panel *panel = &connector->panel; - struct drm_device *dev = connector->base.dev; - - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", - bd->props.brightness, bd->props.max_brightness); - intel_panel_set_backlight(connector->base.state, bd->props.brightness, - bd->props.max_brightness); - - /* - * Allow flipping bl_power as a sub-state of enabled. Sadly the - * backlight class device does not make it easy to to differentiate - * between callbacks for brightness and bl_power, so our backlight_power - * callback needs to take this into account. - */ - if (panel->backlight.enabled) { - if (panel->backlight.power) { - bool enable = bd->props.power == FB_BLANK_UNBLANK && - bd->props.brightness != 0; - panel->backlight.power(connector, enable); - } - } else { - bd->props.power = FB_BLANK_POWERDOWN; - } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); - return 0; -} - -static int intel_backlight_device_get_brightness(struct backlight_device *bd) -{ - struct intel_connector *connector = bl_get_data(bd); - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - intel_wakeref_t wakeref; - int ret = 0; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - u32 hw_level; - - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - hw_level = intel_panel_get_backlight(connector); - ret = scale_hw_to_user(connector, - hw_level, bd->props.max_brightness); - - drm_modeset_unlock(&dev->mode_config.connection_mutex); - } - - return ret; -} - -static const struct backlight_ops intel_backlight_device_ops = { - .update_status = intel_backlight_device_update_status, - .get_brightness = intel_backlight_device_get_brightness, -}; - -int intel_backlight_device_register(struct intel_connector *connector) -{ - struct drm_i915_private *i915 = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - struct backlight_properties props; - struct backlight_device *bd; - const char *name; - int ret = 0; - - if (WARN_ON(panel->backlight.device)) - return -ENODEV; - - if (!panel->backlight.present) - return 0; - - WARN_ON(panel->backlight.max == 0); - - memset(&props, 0, sizeof(props)); - props.type = BACKLIGHT_RAW; - - /* - * Note: Everything should work even if the backlight device max - * presented to the userspace is arbitrarily chosen. - */ - props.max_brightness = panel->backlight.max; - props.brightness = scale_hw_to_user(connector, - panel->backlight.level, - props.max_brightness); - - if (panel->backlight.enabled) - props.power = FB_BLANK_UNBLANK; - else - props.power = FB_BLANK_POWERDOWN; - - name = kstrdup("intel_backlight", GFP_KERNEL); - if (!name) - return -ENOMEM; - - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); - - /* - * Using the same name independent of the drm device or connector - * prevents registration of multiple backlight devices in the - * driver. However, we need to use the default name for backward - * compatibility. Use unique names for subsequent backlight devices as a - * fallback when the default name already exists. - */ - if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) { - kfree(name); - name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", - i915->drm.primary->index, connector->base.name); - if (!name) - return -ENOMEM; - - bd = backlight_device_register(name, connector->base.kdev, connector, - &intel_backlight_device_ops, &props); - } - - if (IS_ERR(bd)) { - drm_err(&i915->drm, - "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n", - connector->base.base.id, connector->base.name, name, PTR_ERR(bd)); - ret = PTR_ERR(bd); - goto out; - } - - panel->backlight.device = bd; - - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] backlight device %s registered\n", - connector->base.base.id, connector->base.name, name); - -out: - kfree(name); - - return ret; -} - -void intel_backlight_device_unregister(struct intel_connector *connector) -{ - struct intel_panel *panel = &connector->panel; - - if (panel->backlight.device) { - backlight_device_unregister(panel->backlight.device); - panel->backlight.device = NULL; - } -} -#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ - -/* - * CNP: PWM clock frequency is 19.2 MHz or 24 MHz. - * PWM increment = 1 - */ -static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), - pwm_freq_hz); -} - -/* - * BXT: PWM clock frequency = 19.2 MHz. - */ -static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz); -} - -/* - * SPT: This value represents the period of the PWM stream in clock periods - * multiplied by 16 (default increment) or 128 (alternate increment selected in - * SCHICKEN_1 bit 0). PWM clock is 24 MHz. - */ -static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct intel_panel *panel = &connector->panel; - u32 mul; - - if (panel->backlight.alternate_pwm_increment) - mul = 128; - else - mul = 16; - - return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul); -} - -/* - * LPT: This value represents the period of the PWM stream in clock periods - * multiplied by 128 (default increment) or 16 (alternate increment, selected in - * LPT SOUTH_CHICKEN2 register bit 5). - */ -static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 mul, clock; - - if (panel->backlight.alternate_pwm_increment) - mul = 16; - else - mul = 128; - - if (HAS_PCH_LPT_H(dev_priv)) - clock = MHz(135); /* LPT:H */ - else - clock = MHz(24); /* LPT:LP */ - - return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); -} - -/* - * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH - * display raw clocks multiplied by 128. - */ -static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - - return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), - pwm_freq_hz * 128); -} - -/* - * Gen2: This field determines the number of time base events (display core - * clock frequency/32) in total for a complete cycle of modulated backlight - * control. - * - * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock) - * divided by 32. - */ -static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - int clock; - - if (IS_PINEVIEW(dev_priv)) - clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); - else - clock = KHz(dev_priv->cdclk.hw.cdclk); - - return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); -} - -/* - * Gen4: This value represents the period of the PWM stream in display core - * clocks ([DevCTG] HRAW clocks) multiplied by 128. - * - */ -static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - int clock; - - if (IS_G4X(dev_priv)) - clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); - else - clock = KHz(dev_priv->cdclk.hw.cdclk); - - return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); -} - -/* - * VLV: This value represents the period of the PWM stream in display core - * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks - * multiplied by 16. CHV uses a 19.2MHz S0IX clock. - */ -static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - int mul, clock; - - if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { - if (IS_CHERRYVIEW(dev_priv)) - clock = KHz(19200); - else - clock = MHz(25); - mul = 16; - } else { - clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); - mul = 128; - } - - return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); -} - -static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv) -{ - u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; - - if (pwm_freq_hz) { - drm_dbg_kms(&dev_priv->drm, - "VBT defined backlight frequency %u Hz\n", - pwm_freq_hz); - } else { - pwm_freq_hz = 200; - drm_dbg_kms(&dev_priv->drm, - "default backlight frequency %u Hz\n", - pwm_freq_hz); - } - - return pwm_freq_hz; -} - -static u32 get_backlight_max_vbt(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); - u32 pwm; - - if (!panel->backlight.pwm_funcs->hz_to_pwm) { - drm_dbg_kms(&dev_priv->drm, - "backlight frequency conversion not supported\n"); - return 0; - } - - pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz); - if (!pwm) { - drm_dbg_kms(&dev_priv->drm, - "backlight frequency conversion failed\n"); - return 0; - } - - return pwm; -} - -/* - * Note: The setup hooks can't assume pipe is set! - */ -static u32 get_backlight_min_vbt(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - int min; - - drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0); - - /* - * XXX: If the vbt value is 255, it makes min equal to max, which leads - * to problems. There are such machines out there. Either our - * interpretation is wrong or the vbt has bogus data. Or both. Safeguard - * against this by letting the minimum be at most (arbitrarily chosen) - * 25% of the max. - */ - min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); - if (min != dev_priv->vbt.backlight.min_brightness) { - drm_dbg_kms(&dev_priv->drm, - "clamping VBT min backlight %d/255 to %d/255\n", - dev_priv->vbt.backlight.min_brightness, min); - } - - /* vbt value is a coefficient in range [0..255] */ - return scale(min, 0, 255, 0, panel->backlight.pwm_level_max); -} - -static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; - bool alt, cpu_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (HAS_PCH_LPT(dev_priv)) - alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; + if (HAS_GMCH(i915)) + return gmch_panel_fitting(crtc_state, conn_state); else - alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; - panel->backlight.alternate_pwm_increment = alt; - - pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; - - pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); - panel->backlight.pwm_level_max = pch_ctl2 >> 16; - - cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE; - - cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) && - !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) && - (cpu_ctl2 & BLM_PWM_ENABLE); - - if (cpu_mode) { - val = pch_get_backlight(connector, unused); - - drm_dbg_kms(&dev_priv->drm, - "CPU backlight register was enabled, switching to PCH override\n"); - - /* Write converted CPU PWM value to PCH override register */ - lpt_set_backlight(connector->base.state, val); - intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, - pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); - - intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, - cpu_ctl2 & ~BLM_PWM_ENABLE); - } - - return 0; -} - -static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 cpu_ctl2, pch_ctl1, pch_ctl2; - - pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); - panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; - - pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); - panel->backlight.pwm_level_max = pch_ctl2 >> 16; - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); - panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && - (pch_ctl1 & BLM_PCH_PWM_ENABLE); - - return 0; -} - -static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 ctl, val; - - ctl = intel_de_read(dev_priv, BLC_PWM_CTL); - - if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) - panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; - - if (IS_PINEVIEW(dev_priv)) - panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV; - - panel->backlight.pwm_level_max = ctl >> 17; - - if (!panel->backlight.pwm_level_max) { - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - panel->backlight.pwm_level_max >>= 1; - } - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - if (panel->backlight.combination_mode) - panel->backlight.pwm_level_max *= 0xff; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - val = i9xx_get_backlight(connector, unused); - val = intel_panel_invert_pwm_level(connector, val); - val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max); - - panel->backlight.pwm_enabled = val != 0; - - return 0; -} - -static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 ctl, ctl2; - - ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); - panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE; - panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; - - ctl = intel_de_read(dev_priv, BLC_PWM_CTL); - panel->backlight.pwm_level_max = ctl >> 16; - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - if (panel->backlight.combination_mode) - panel->backlight.pwm_level_max *= 0xff; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; - - return 0; -} - -static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 ctl, ctl2; - - if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) - return -ENODEV; - - ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); - panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; - - ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)); - panel->backlight.pwm_level_max = ctl >> 16; - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; - - return 0; -} - -static int -bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 pwm_ctl, val; - - panel->backlight.controller = dev_priv->vbt.backlight.controller; - - pwm_ctl = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - - /* Controller 1 uses the utility pin. */ - if (panel->backlight.controller == 1) { - val = intel_de_read(dev_priv, UTIL_PIN_CTL); - panel->backlight.util_pin_active_low = - val & UTIL_PIN_POLARITY; - } - - panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; - panel->backlight.pwm_level_max = - intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller)); - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; - - return 0; -} - -static int -cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - u32 pwm_ctl; - - /* - * CNP has the BXT implementation of backlight, but with only one - * controller. TODO: ICP has multiple controllers but we only use - * controller 0 for now. - */ - panel->backlight.controller = 0; - - pwm_ctl = intel_de_read(dev_priv, - BXT_BLC_PWM_CTL(panel->backlight.controller)); - - panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; - panel->backlight.pwm_level_max = - intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller)); - - if (!panel->backlight.pwm_level_max) - panel->backlight.pwm_level_max = get_backlight_max_vbt(connector); - - if (!panel->backlight.pwm_level_max) - return -ENODEV; - - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; - - return 0; -} - -static int ext_pwm_setup_backlight(struct intel_connector *connector, - enum pipe pipe) -{ - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_panel *panel = &connector->panel; - const char *desc; - u32 level; - - /* Get the right PWM chip for DSI backlight according to VBT */ - if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) { - panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight"); - desc = "PMIC"; - } else { - panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight"); - desc = "SoC"; - } - - if (IS_ERR(panel->backlight.pwm)) { - drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n", - desc); - panel->backlight.pwm = NULL; - return -ENODEV; - } - - panel->backlight.pwm_level_max = 100; /* 100% */ - panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); - - if (pwm_is_enabled(panel->backlight.pwm)) { - /* PWM is already enabled, use existing settings */ - pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state); - - level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state, - 100); - level = intel_panel_invert_pwm_level(connector, level); - panel->backlight.pwm_enabled = true; - - drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", - NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, - get_vbt_pwm_freq(dev_priv), level); - } else { - /* Set period from VBT frequency, leave other settings at 0. */ - panel->backlight.pwm_state.period = - NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv); - } - - drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", - desc); - return 0; -} - -static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_panel *panel = &connector->panel; - - panel->backlight.pwm_funcs->set(conn_state, - intel_panel_invert_pwm_level(connector, level)); + return pch_panel_fitting(crtc_state, conn_state); } -static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe) -{ - struct intel_panel *panel = &connector->panel; - - return intel_panel_invert_pwm_level(connector, - panel->backlight.pwm_funcs->get(connector, pipe)); -} - -static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_panel *panel = &connector->panel; - - panel->backlight.pwm_funcs->enable(crtc_state, conn_state, - intel_panel_invert_pwm_level(connector, level)); -} - -static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_panel *panel = &connector->panel; - - panel->backlight.pwm_funcs->disable(conn_state, - intel_panel_invert_pwm_level(connector, level)); -} - -static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) -{ - struct intel_panel *panel = &connector->panel; - int ret = panel->backlight.pwm_funcs->setup(connector, pipe); - - if (ret < 0) - return ret; - - panel->backlight.min = panel->backlight.pwm_level_min; - panel->backlight.max = panel->backlight.pwm_level_max; - panel->backlight.level = intel_pwm_get_backlight(connector, pipe); - panel->backlight.enabled = panel->backlight.pwm_enabled; - - return 0; -} - -void intel_panel_update_backlight(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_panel *panel = &connector->panel; - - if (!panel->backlight.present) - return; - - mutex_lock(&dev_priv->backlight_lock); - if (!panel->backlight.enabled) - __intel_panel_enable_backlight(crtc_state, conn_state); - - mutex_unlock(&dev_priv->backlight_lock); -} - -int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) +enum drm_connector_status +intel_panel_detect(struct drm_connector *connector, bool force) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_panel *panel = &intel_connector->panel; - int ret; - - if (!dev_priv->vbt.backlight.present) { - if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { - drm_dbg_kms(&dev_priv->drm, - "no backlight present per VBT, but present per quirk\n"); - } else { - drm_dbg_kms(&dev_priv->drm, - "no backlight present per VBT\n"); - return 0; - } - } - - /* ensure intel_panel has been initialized first */ - if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs)) - return -ENODEV; - - /* set level and max in panel struct */ - mutex_lock(&dev_priv->backlight_lock); - ret = panel->backlight.funcs->setup(intel_connector, pipe); - mutex_unlock(&dev_priv->backlight_lock); - - if (ret) { - drm_dbg_kms(&dev_priv->drm, - "failed to setup backlight for connector %s\n", - connector->name); - return ret; - } - - panel->backlight.present = true; - - drm_dbg_kms(&dev_priv->drm, - "Connector %s backlight initialized, %s, brightness %u/%u\n", - connector->name, - enableddisabled(panel->backlight.enabled), - panel->backlight.level, panel->backlight.max); - - return 0; -} + struct drm_i915_private *i915 = to_i915(connector->dev); -static void intel_panel_destroy_backlight(struct intel_panel *panel) -{ - /* dispose of the pwm */ - if (panel->backlight.pwm) - pwm_put(panel->backlight.pwm); + if (!INTEL_DISPLAY_ENABLED(i915)) + return connector_status_disconnected; - panel->backlight.present = false; + return connector_status_connected; } -static const struct intel_panel_bl_funcs bxt_pwm_funcs = { - .setup = bxt_setup_backlight, - .enable = bxt_enable_backlight, - .disable = bxt_disable_backlight, - .set = bxt_set_backlight, - .get = bxt_get_backlight, - .hz_to_pwm = bxt_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs cnp_pwm_funcs = { - .setup = cnp_setup_backlight, - .enable = cnp_enable_backlight, - .disable = cnp_disable_backlight, - .set = bxt_set_backlight, - .get = bxt_get_backlight, - .hz_to_pwm = cnp_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs lpt_pwm_funcs = { - .setup = lpt_setup_backlight, - .enable = lpt_enable_backlight, - .disable = lpt_disable_backlight, - .set = lpt_set_backlight, - .get = lpt_get_backlight, - .hz_to_pwm = lpt_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs spt_pwm_funcs = { - .setup = lpt_setup_backlight, - .enable = lpt_enable_backlight, - .disable = lpt_disable_backlight, - .set = lpt_set_backlight, - .get = lpt_get_backlight, - .hz_to_pwm = spt_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs pch_pwm_funcs = { - .setup = pch_setup_backlight, - .enable = pch_enable_backlight, - .disable = pch_disable_backlight, - .set = pch_set_backlight, - .get = pch_get_backlight, - .hz_to_pwm = pch_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs ext_pwm_funcs = { - .setup = ext_pwm_setup_backlight, - .enable = ext_pwm_enable_backlight, - .disable = ext_pwm_disable_backlight, - .set = ext_pwm_set_backlight, - .get = ext_pwm_get_backlight, -}; - -static const struct intel_panel_bl_funcs vlv_pwm_funcs = { - .setup = vlv_setup_backlight, - .enable = vlv_enable_backlight, - .disable = vlv_disable_backlight, - .set = vlv_set_backlight, - .get = vlv_get_backlight, - .hz_to_pwm = vlv_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs i965_pwm_funcs = { - .setup = i965_setup_backlight, - .enable = i965_enable_backlight, - .disable = i965_disable_backlight, - .set = i9xx_set_backlight, - .get = i9xx_get_backlight, - .hz_to_pwm = i965_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs i9xx_pwm_funcs = { - .setup = i9xx_setup_backlight, - .enable = i9xx_enable_backlight, - .disable = i9xx_disable_backlight, - .set = i9xx_set_backlight, - .get = i9xx_get_backlight, - .hz_to_pwm = i9xx_hz_to_pwm, -}; - -static const struct intel_panel_bl_funcs pwm_bl_funcs = { - .setup = intel_pwm_setup_backlight, - .enable = intel_pwm_enable_backlight, - .disable = intel_pwm_disable_backlight, - .set = intel_pwm_set_backlight, - .get = intel_pwm_get_backlight, -}; - -/* Set up chip specific backlight functions */ -static void -intel_panel_init_backlight_funcs(struct intel_panel *panel) +enum drm_mode_status +intel_panel_mode_valid(struct intel_connector *connector, + const struct drm_display_mode *mode) { - struct intel_connector *connector = - container_of(panel, struct intel_connector, panel); - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode; - if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && - intel_dsi_dcs_init_backlight_funcs(connector) == 0) - return; - - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - panel->backlight.pwm_funcs = &bxt_pwm_funcs; - } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { - panel->backlight.pwm_funcs = &cnp_pwm_funcs; - } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { - if (HAS_PCH_LPT(dev_priv)) - panel->backlight.pwm_funcs = &lpt_pwm_funcs; - else - panel->backlight.pwm_funcs = &spt_pwm_funcs; - } else if (HAS_PCH_SPLIT(dev_priv)) { - panel->backlight.pwm_funcs = &pch_pwm_funcs; - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { - panel->backlight.pwm_funcs = &ext_pwm_funcs; - } else { - panel->backlight.pwm_funcs = &vlv_pwm_funcs; - } - } else if (DISPLAY_VER(dev_priv) == 4) { - panel->backlight.pwm_funcs = &i965_pwm_funcs; - } else { - panel->backlight.pwm_funcs = &i9xx_pwm_funcs; - } - - if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && - intel_dp_aux_init_backlight_funcs(connector) == 0) - return; + if (!fixed_mode) + return MODE_OK; - /* We're using a standard PWM backlight interface */ - panel->backlight.funcs = &pwm_bl_funcs; -} + if (mode->hdisplay != fixed_mode->hdisplay) + return MODE_PANEL; -enum drm_connector_status -intel_panel_detect(struct drm_connector *connector, bool force) -{ - struct drm_i915_private *i915 = to_i915(connector->dev); + if (mode->vdisplay != fixed_mode->vdisplay) + return MODE_PANEL; - if (!INTEL_DISPLAY_ENABLED(i915)) - return connector_status_disconnected; + if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(fixed_mode)) + return MODE_PANEL; - return connector_status_connected; + return MODE_OK; } int intel_panel_init(struct intel_panel *panel, struct drm_display_mode *fixed_mode, struct drm_display_mode *downclock_mode) { - intel_panel_init_backlight_funcs(panel); + intel_backlight_init_funcs(panel); panel->fixed_mode = fixed_mode; panel->downclock_mode = downclock_mode; @@ -2245,7 +542,7 @@ void intel_panel_fini(struct intel_panel *panel) struct intel_connector *intel_connector = container_of(panel, struct intel_connector, panel); - intel_panel_destroy_backlight(panel); + intel_backlight_destroy(panel); if (panel->fixed_mode) drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 1d340f77bffc..d50b3f7e9e58 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -8,15 +8,13 @@ #include <linux/types.h> -#include "intel_display.h" - +enum drm_connector_status; struct drm_connector; struct drm_connector_state; struct drm_display_mode; +struct drm_i915_private; struct intel_connector; -struct intel_crtc; struct intel_crtc_state; -struct intel_encoder; struct intel_panel; int intel_panel_init(struct intel_panel *panel, @@ -25,23 +23,16 @@ int intel_panel_init(struct intel_panel *panel, void intel_panel_fini(struct intel_panel *panel); enum drm_connector_status intel_panel_detect(struct drm_connector *connector, bool force); -void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, +bool intel_panel_use_ssc(struct drm_i915_private *i915); +void intel_panel_fixed_mode(const struct drm_display_mode *fixed_mode, struct drm_display_mode *adjusted_mode); -int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); -int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); -void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state, - u32 level, u32 max); -int intel_panel_setup_backlight(struct drm_connector *connector, - enum pipe pipe); -void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); -void intel_panel_update_backlight(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); -void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); +enum drm_mode_status +intel_panel_mode_valid(struct intel_connector *connector, + const struct drm_display_mode *mode); +int intel_panel_fitting(struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +int intel_panel_compute_config(struct intel_connector *connector, + struct drm_display_mode *adjusted_mode); struct drm_display_mode * intel_panel_edid_downclock_mode(struct intel_connector *connector, const struct drm_display_mode *fixed_mode); @@ -49,22 +40,5 @@ struct drm_display_mode * intel_panel_edid_fixed_mode(struct intel_connector *connector); struct drm_display_mode * intel_panel_vbt_fixed_mode(struct intel_connector *connector); -void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level); -u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level); -u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level); -u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val); - -#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -int intel_backlight_device_register(struct intel_connector *connector); -void intel_backlight_device_unregister(struct intel_connector *connector); -#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ -static inline int intel_backlight_device_register(struct intel_connector *connector) -{ - return 0; -} -static inline void intel_backlight_device_unregister(struct intel_connector *connector) -{ -} -#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ #endif /* __INTEL_PANEL_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c new file mode 100644 index 000000000000..a55c4bfacd0d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "g4x_dp.h" +#include "intel_crt.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_fdi.h" +#include "intel_lvds.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" +#include "intel_pps.h" +#include "intel_sdvo.h" + +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t dp_reg) +{ + enum pipe port_pipe; + bool state; + + state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t hdmi_reg) +{ + enum pipe port_pipe; + bool state; + + state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + enum pipe port_pipe; + + assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); + assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); + assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); + + I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && + port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && + port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + /* PCH SDVOB multiplex with HDMIB */ + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); +} + +static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 val; + bool enabled; + + val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); + enabled = !!(val & TRANS_ENABLE); + I915_STATE_WARN(enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); +} + +static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, + enum pipe pch_transcoder) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), + intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), + intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), + intel_de_read(dev_priv, HSYNC(cpu_transcoder))); + + intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), + intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), + intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), + intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); +} + +static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val, pipeconf_val; + + /* Make sure PCH DPLL is enabled */ + assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, pipe); + assert_fdi_rx_enabled(dev_priv, pipe); + + if (HAS_PCH_CPT(dev_priv)) { + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + /* + * Workaround: Set the timing override bit + * before enabling the pch transcoder. + */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, reg, val); + } + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); + + if (HAS_PCH_IBX(dev_priv)) { + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_FRAME_START_DELAY_MASK; + val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + + /* + * Make the BPC in transcoder be consistent with + * that in pipeconf reg. For HDMI we must use 8bpc + * here for both 8bpc and 12bpc. + */ + val &= ~PIPECONF_BPC_MASK; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + val |= PIPECONF_8BPC; + else + val |= pipeconf_val & PIPECONF_BPC_MASK; + } + + val &= ~TRANS_INTERLACE_MASK; + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { + if (HAS_PCH_IBX(dev_priv) && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) + val |= TRANS_LEGACY_INTERLACED_ILK; + else + val |= TRANS_INTERLACED; + } else { + val |= TRANS_PROGRESSIVE; + } + + intel_de_write(dev_priv, reg, val | TRANS_ENABLE); + if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", + pipe_name(pipe)); +} + +static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val; + + /* FDI relies on the transcoder */ + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + + /* Ports must be off as well */ + assert_pch_ports_disabled(dev_priv, pipe); + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, reg, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", + pipe_name(pipe)); + + if (HAS_PCH_CPT(dev_priv)) { + /* Workaround: Clear the timing override chicken bit again. */ + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, reg, val); + } +} + +/* + * Enable PCH resources required for PCH ports: + * - PCH PLLs + * - FDI training & RX/TX + * - update transcoder timings + * - DP transcoding bits + * - transcoder + */ +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum pipe pipe = crtc->pipe; + u32 temp; + + assert_pch_transcoder_disabled(dev_priv, pipe); + + /* For PCH output, training FDI link */ + intel_fdi_link_train(crtc, crtc_state); + + /* + * We need to program the right clock selection + * before writing the pixel multiplier into the DPLL. + */ + if (HAS_PCH_CPT(dev_priv)) { + u32 sel; + + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp |= TRANS_DPLL_ENABLE(pipe); + sel = TRANS_DPLLB_SEL(pipe); + if (crtc_state->shared_dpll == + intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) + temp |= sel; + else + temp &= ~sel; + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + /* + * XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that + * to have the right LVDS enable sequence. + */ + intel_enable_shared_dpll(crtc_state); + + /* set transcoder timing, panel must allow it */ + assert_pps_unlocked(dev_priv, pipe); + ilk_pch_transcoder_set_timings(crtc_state, pipe); + + intel_fdi_normal_train(crtc); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev_priv) && + intel_crtc_has_dp_encoder(crtc_state)) { + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + i915_reg_t reg = TRANS_DP_CTL(pipe); + enum port port; + + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); + temp |= TRANS_DP_OUTPUT_ENABLE; + temp |= bpc << 9; /* same format but at 11:9 */ + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + port = intel_get_crtc_new_encoder(state, crtc_state)->port; + drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D); + temp |= TRANS_DP_PORT_SEL(port); + + intel_de_write(dev_priv, reg, temp); + } + + ilk_enable_pch_transcoder(crtc_state); +} + +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + ilk_fdi_disable(crtc); +} + +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + ilk_disable_pch_transcoder(crtc); + + if (HAS_PCH_CPT(dev_priv)) { + i915_reg_t reg; + u32 temp; + + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; + intel_de_write(dev_priv, reg, temp); + + /* disable DPLL_SEL */ + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + ilk_fdi_pll_disable(crtc); +} + +static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* read out port_clock from the DPLL */ + i9xx_crtc_clock_get(crtc, crtc_state); + + /* + * In case there is an active pipe without active ports, + * we may need some idea for the dotclock anyway. + * Calculate one based on the FDI configuration. + */ + crtc_state->hw.adjusted_mode.crtc_clock = + intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state), + &crtc_state->fdi_m_n); +} + +void ilk_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + enum pipe pipe = crtc->pipe; + enum intel_dpll_id pll_id; + bool pll_active; + u32 tmp; + + if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ + pll_id = (enum intel_dpll_id) pipe; + } else { + tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); + if (tmp & TRANS_DPLLB_SEL(pipe)) + pll_id = DPLL_ID_PCH_PLL_B; + else + pll_id = DPLL_ID_PCH_PLL_A; + } + + crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id); + pll = crtc_state->shared_dpll; + + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &crtc_state->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); + + tmp = crtc_state->dpll_hw_state.dpll; + crtc_state->pixel_multiplier = + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + + ilk_pch_clock_get(crtc_state); +} + +static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 val, pipeconf_val; + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); + assert_fdi_rx_enabled(dev_priv, PIPE_A); + + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + /* Workaround: set timing override bit. */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + + val = TRANS_ENABLE; + pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == + PIPECONF_INTERLACED_ILK) + val |= TRANS_INTERLACED; + else + val |= TRANS_PROGRESSIVE; + + intel_de_write(dev_priv, LPT_TRANSCONF, val); + if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); +} + +static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = intel_de_read(dev_priv, LPT_TRANSCONF); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, LPT_TRANSCONF, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); + + /* Workaround: clear timing override bit. */ + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); +} + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + assert_pch_transcoder_disabled(dev_priv, PIPE_A); + + lpt_program_iclkip(crtc_state); + + /* Set transcoder timing. */ + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); + + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); +} + +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + lpt_disable_pch_transcoder(dev_priv); + + lpt_disable_iclkip(dev_priv); +} + +void lpt_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp; + + if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h new file mode 100644 index 000000000000..2c387fe3a467 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_DISPLAY_H_ +#define _INTEL_PCH_DISPLAY_H_ + +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; + +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_get_config(struct intel_crtc_state *crtc_state); + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_get_config(struct intel_crtc_state *crtc_state); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c new file mode 100644 index 000000000000..b688fd87e3da --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_panel.h" +#include "intel_pch_refclk.h" +#include "intel_sbi.h" + +static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); +} + +/* WaMPhyProgramming:hsw */ +static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + lpt_fdi_reset_mphy(dev_priv); + + tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); + tmp &= ~(0xFF << 24); + tmp |= (0x12 << 24); + intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); +} + +void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +{ + u32 temp; + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Program iCLKIP clock to the desired frequency */ +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int clock = crtc_state->hw.adjusted_mode.crtc_clock; + u32 divsel, phaseinc, auxdiv, phasedir = 0; + u32 temp; + + lpt_disable_iclkip(dev_priv); + + /* The iCLK virtual clock root frequency is in MHz, + * but the adjusted_mode->crtc_clock in KHz. To get the + * divisors, it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + for (auxdiv = 0; auxdiv < 2; auxdiv++) { + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + clock << auxdiv); + divsel = (desired_divisor / iclk_pi_range) - 2; + phaseinc = desired_divisor % iclk_pi_range; + + /* + * Near 20MHz is a corner case which is + * out of range for the 7-bit divisor + */ + if (divsel <= 0x7f) + break; + } + + /* This should not happen with any sane values */ + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); + + drm_dbg_kms(&dev_priv->drm, + "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + clock, auxdiv, divsel, phasedir, phaseinc); + + mutex_lock(&dev_priv->sb_lock); + + /* Program SSCDIVINTPHASE6 */ + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); + + /* Program SSCAUXDIV */ + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); + intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); + + /* Enable modulator and associated divider */ + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp &= ~SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); + + /* Wait for initialization time */ + udelay(24); + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); +} + +int lpt_get_iclkip(struct drm_i915_private *dev_priv) +{ + u32 divsel, phaseinc, auxdiv; + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + u32 temp; + + if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + return 0; + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + if (temp & SBI_SSCCTL_DISABLE) { + mutex_unlock(&dev_priv->sb_lock); + return 0; + } + + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> + SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; + phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> + SBI_SSCDIVINTPHASE_INCVAL_SHIFT; + + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> + SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; + + mutex_unlock(&dev_priv->sb_lock); + + desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; + + return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + desired_divisor << auxdiv); +} + +/* Implements 3 different sequences from BSpec chapter "Display iCLK + * Programming" based on the parameters passed: + * - Sequence to enable CLKOUT_DP + * - Sequence to enable CLKOUT_DP without spread + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O + */ +static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, + bool with_spread, bool with_fdi) +{ + u32 reg, tmp; + + if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, + "FDI requires downspread\n")) + with_spread = true; + if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && + with_fdi, "LP PCH doesn't have FDI\n")) + with_fdi = false; + + mutex_lock(&dev_priv->sb_lock); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + udelay(24); + + if (with_spread) { + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + if (with_fdi) + lpt_fdi_program_mphy(dev_priv); + } + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Sequence to disable CLKOUT_DP */ +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +{ + u32 reg, tmp; + + mutex_lock(&dev_priv->sb_lock); + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + if (!(tmp & SBI_SSCCTL_DISABLE)) { + if (!(tmp & SBI_SSCCTL_PATHALT)) { + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + udelay(32); + } + tmp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + } + + mutex_unlock(&dev_priv->sb_lock); +} + +#define BEND_IDX(steps) ((50 + (steps)) / 5) + +static const u16 sscdivintphase[] = { + [BEND_IDX( 50)] = 0x3B23, + [BEND_IDX( 45)] = 0x3B23, + [BEND_IDX( 40)] = 0x3C23, + [BEND_IDX( 35)] = 0x3C23, + [BEND_IDX( 30)] = 0x3D23, + [BEND_IDX( 25)] = 0x3D23, + [BEND_IDX( 20)] = 0x3E23, + [BEND_IDX( 15)] = 0x3E23, + [BEND_IDX( 10)] = 0x3F23, + [BEND_IDX( 5)] = 0x3F23, + [BEND_IDX( 0)] = 0x0025, + [BEND_IDX( -5)] = 0x0025, + [BEND_IDX(-10)] = 0x0125, + [BEND_IDX(-15)] = 0x0125, + [BEND_IDX(-20)] = 0x0225, + [BEND_IDX(-25)] = 0x0225, + [BEND_IDX(-30)] = 0x0325, + [BEND_IDX(-35)] = 0x0325, + [BEND_IDX(-40)] = 0x0425, + [BEND_IDX(-45)] = 0x0425, + [BEND_IDX(-50)] = 0x0525, +}; + +/* + * Bend CLKOUT_DP + * steps -50 to 50 inclusive, in steps of 5 + * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) + * change in clock period = -(steps / 10) * 5.787 ps + */ +static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) +{ + u32 tmp; + int idx = BEND_IDX(steps); + + if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) + return; + + if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) + return; + + mutex_lock(&dev_priv->sb_lock); + + if (steps % 10 != 0) + tmp = 0xAAAAAAAB; + else + tmp = 0x00000000; + intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); + tmp &= 0xffff0000; + tmp |= sscdivintphase[idx]; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +#undef BEND_IDX + +static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, SPLL_CTL); + + if ((ctl & SPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + if (IS_BROADWELL(dev_priv) && + (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) + return true; + + return false; +} + +static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, + enum intel_dpll_id id) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); + + if ((ctl & WRPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) + return true; + + if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && + (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + return false; +} + +static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + bool has_fdi = false; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_fdi = true; + break; + default: + break; + } + } + + /* + * The BIOS may have decided to use the PCH SSC + * reference so we must not disable it until the + * relevant PLLs have stopped relying on it. We'll + * just leave the PCH SSC reference enabled in case + * any active PLL is using it. It will get disabled + * after runtime suspend if we don't have FDI. + * + * TODO: Move the whole reference clock handling + * to the modeset sequence proper so that we can + * actually enable/disable/reconfigure these things + * safely. To do that we need to introduce a real + * clock hierarchy. That would also allow us to do + * clock bending finally. + */ + dev_priv->pch_ssc_use = 0; + + if (spll_uses_pch_ssc(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); + } + + if (dev_priv->pch_ssc_use) + return; + + if (has_fdi) { + lpt_bend_clkout_dp(dev_priv, 0); + lpt_enable_clkout_dp(dev_priv, true, true); + } else { + lpt_disable_clkout_dp(dev_priv); + } +} + +static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + int i; + u32 val, final; + bool has_lvds = false; + bool has_cpu_edp = false; + bool has_panel = false; + bool has_ck505 = false; + bool can_ssc = false; + bool using_ssc_source = false; + + /* We need to take the global config into account */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + has_panel = true; + has_lvds = true; + break; + case INTEL_OUTPUT_EDP: + has_panel = true; + if (encoder->port == PORT_A) + has_cpu_edp = true; + break; + default: + break; + } + } + + if (HAS_PCH_IBX(dev_priv)) { + has_ck505 = dev_priv->vbt.display_clock_mode; + can_ssc = has_ck505; + } else { + has_ck505 = false; + can_ssc = true; + } + + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + drm_dbg_kms(&dev_priv->drm, + "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); + + /* Ironlake: try to setup display ref clock before DPLL + * enabling. This is only under driver's control after + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); + + /* As we must carefully and slowly disable/enable each source in turn, + * compute the final state we want first and check if we need to + * make any changes at all. + */ + final = val; + final &= ~DREF_NONSPREAD_SOURCE_MASK; + if (has_ck505) + final |= DREF_NONSPREAD_CK505_ENABLE; + else + final |= DREF_NONSPREAD_SOURCE_ENABLE; + + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + final &= ~DREF_SSC1_ENABLE; + + if (has_panel) { + final |= DREF_SSC_SOURCE_ENABLE; + + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_SSC1_ENABLE; + + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else { + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; + } + + if (final == val) + return; + + /* Always enable nonspread source */ + val &= ~DREF_NONSPREAD_SOURCE_MASK; + + if (has_ck505) + val |= DREF_NONSPREAD_CK505_ENABLE; + else + val |= DREF_NONSPREAD_SOURCE_ENABLE; + + if (has_panel) { + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_ENABLE; + + /* SSC must be turned on before enabling the CPU output */ + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); + val |= DREF_SSC1_ENABLE; + } else { + val &= ~DREF_SSC1_ENABLE; + } + + /* Get SSC going before enabling the outputs */ + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, + "Using SSC on eDP\n"); + val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + } else { + val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } + } else { + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } else { + drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Turn off CPU output */ + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + if (!using_ssc_source) { + drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); + + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; + + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } + } + + BUG_ON(val != final); +} + +/* + * Initialize reference clocks when the driver loads + */ +void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) + ilk_init_pch_refclk(dev_priv); + else if (HAS_PCH_LPT(dev_priv)) + lpt_init_pch_refclk(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h new file mode 100644 index 000000000000..12ab2c75a800 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_REFCLK_H_ +#define _INTEL_PCH_REFCLK_H_ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; + +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); +void lpt_disable_iclkip(struct drm_i915_private *dev_priv); +int lpt_get_iclkip(struct drm_i915_private *dev_priv); + +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c new file mode 100644 index 000000000000..01ce1d72297f --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_atomic_plane.h" +#include "intel_display.h" +#include "intel_display_types.h" +#include "intel_fb.h" +#include "intel_plane_initial.h" + +static bool +intel_reuse_initial_plane_obj(struct drm_i915_private *i915, + const struct intel_initial_plane_config *plane_config, + struct drm_framebuffer **fb, + struct i915_vma **vma) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + if (!crtc_state->uapi.active) + continue; + + if (!plane_state->ggtt_vma) + continue; + + if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { + *fb = plane_state->hw.fb; + *vma = plane_state->ggtt_vma; + return true; + } + } + + return false; +} + +static struct i915_vma * +initial_plane_vma(struct drm_i915_private *i915, + struct intel_initial_plane_config *plane_config) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 base, size; + + if (plane_config->size == 0) + return NULL; + + base = round_down(plane_config->base, + I915_GTT_MIN_ALIGNMENT); + size = round_up(plane_config->base + plane_config->size, + I915_GTT_MIN_ALIGNMENT); + size -= base; + + /* + * If the FB is too big, just don't use it since fbdev is not very + * important and we should probably use that space with FBC or other + * features. + */ + if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) && + size * 2 > i915->stolen_usable_size) + return NULL; + + obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); + if (IS_ERR(obj)) + return NULL; + + /* + * Mark it WT ahead of time to avoid changing the + * cache_level during fbdev initialization. The + * unbind there would get stuck waiting for rcu. + */ + i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ? + I915_CACHE_WT : I915_CACHE_NONE); + + switch (plane_config->tiling) { + case I915_TILING_NONE: + break; + case I915_TILING_X: + case I915_TILING_Y: + obj->tiling_and_stride = + plane_config->fb->base.pitches[0] | + plane_config->tiling; + break; + default: + MISSING_CASE(plane_config->tiling); + goto err_obj; + } + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) + goto err_obj; + + if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) + goto err_obj; + + if (i915_gem_object_is_tiled(obj) && + !i915_vma_is_map_and_fenceable(vma)) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return NULL; +} + +static bool +intel_alloc_initial_plane_obj(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_framebuffer *fb = &plane_config->fb->base; + struct i915_vma *vma; + + switch (fb->modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + break; + default: + drm_dbg(&dev_priv->drm, + "Unsupported modifier for initial FB: 0x%llx\n", + fb->modifier); + return false; + } + + vma = initial_plane_vma(dev_priv, plane_config); + if (!vma) + return false; + + mode_cmd.pixel_format = fb->format->format; + mode_cmd.width = fb->width; + mode_cmd.height = fb->height; + mode_cmd.pitches[0] = fb->pitches[0]; + mode_cmd.modifier[0] = fb->modifier; + mode_cmd.flags = DRM_MODE_FB_MODIFIERS; + + if (intel_framebuffer_init(to_intel_framebuffer(fb), + vma->obj, &mode_cmd)) { + drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); + goto err_vma; + } + + plane_config->vma = vma; + return true; + +err_vma: + i915_vma_put(vma); + return false; +} + +static void +intel_find_initial_plane_obj(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane = + to_intel_plane(crtc->base.primary); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + struct drm_framebuffer *fb; + struct i915_vma *vma; + + /* + * TODO: + * Disable planes if get_initial_plane_config() failed. + * Make sure things work if the surface base is not page aligned. + */ + if (!plane_config->fb) + return; + + if (intel_alloc_initial_plane_obj(crtc, plane_config)) { + fb = &plane_config->fb->base; + vma = plane_config->vma; + goto valid_fb; + } + + /* + * Failed to alloc the obj, check to see if we should share + * an fb with another CRTC instead + */ + if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma)) + goto valid_fb; + + /* + * We've failed to reconstruct the BIOS FB. Current display state + * indicates that the primary plane is visible, but has a NULL FB, + * which will lead to problems later if we don't fix it up. The + * simplest solution is to just disable the primary plane now and + * pretend the BIOS never had it enabled. + */ + intel_plane_disable_noatomic(crtc, plane); + if (crtc_state->bigjoiner) { + struct intel_crtc *slave = + crtc_state->bigjoiner_linked_crtc; + intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary)); + } + + return; + +valid_fb: + plane_state->uapi.rotation = plane_config->rotation; + intel_fb_fill_view(to_intel_framebuffer(fb), + plane_state->uapi.rotation, &plane_state->view); + + __i915_vma_pin(vma); + plane_state->ggtt_vma = i915_vma_get(vma); + if (intel_plane_uses_fence(plane_state) && + i915_vma_pin_fence(vma) == 0 && vma->fence) + plane_state->flags |= PLANE_HAS_FENCE; + + plane_state->uapi.src_x = 0; + plane_state->uapi.src_y = 0; + plane_state->uapi.src_w = fb->width << 16; + plane_state->uapi.src_h = fb->height << 16; + + plane_state->uapi.crtc_x = 0; + plane_state->uapi.crtc_y = 0; + plane_state->uapi.crtc_w = fb->width; + plane_state->uapi.crtc_h = fb->height; + + if (plane_config->tiling) + dev_priv->preserve_bios_swizzle = true; + + plane_state->uapi.fb = fb; + drm_framebuffer_get(fb); + + plane_state->uapi.crtc = &crtc->base; + intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc); + + atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); +} + +static void plane_config_fini(struct intel_initial_plane_config *plane_config) +{ + if (plane_config->fb) { + struct drm_framebuffer *fb = &plane_config->fb->base; + + /* We may only have the stub and not a full framebuffer */ + if (drm_framebuffer_read_refcount(fb)) + drm_framebuffer_put(fb); + else + kfree(fb); + } + + if (plane_config->vma) + i915_vma_put(plane_config->vma); +} + +void intel_crtc_initial_plane_config(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_initial_plane_config plane_config = {}; + + /* + * Note that reserving the BIOS fb up front prevents us + * from stuffing other stolen allocations like the ring + * on top. This prevents some ugliness at boot time, and + * can even allow for smooth boot transitions if the BIOS + * fb is large enough for the active pipe configuration. + */ + dev_priv->display->get_initial_plane_config(crtc, &plane_config); + + /* + * If the fb is shared between multiple heads, we'll + * just get the first one. + */ + intel_find_initial_plane_obj(crtc, &plane_config); + + plane_config_fini(&plane_config); +} diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h new file mode 100644 index 000000000000..c7e35ab3182b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_PLANE_INITIAL_H__ +#define __INTEL_PLANE_INITIAL_H__ + +struct intel_crtc; + +void intel_crtc_initial_plane_config(struct intel_crtc *crtc); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index a36ec4a818ff..e9c679bb1b2e 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -9,6 +9,7 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpll.h" +#include "intel_lvds.h" #include "intel_pps.h" static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, @@ -1408,3 +1409,61 @@ void intel_pps_setup(struct drm_i915_private *i915) else i915->pps_mmio_base = PPS_BASE; } + +void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + i915_reg_t pp_reg; + u32 val; + enum pipe panel_pipe = INVALID_PIPE; + bool locked = true; + + if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) + return; + + if (HAS_PCH_SPLIT(dev_priv)) { + u32 port_sel; + + pp_reg = PP_CONTROL(0); + port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; + + switch (port_sel) { + case PANEL_PORT_SELECT_LVDS: + intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPA: + g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPC: + g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPD: + g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); + break; + default: + MISSING_CASE(port_sel); + break; + } + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + /* presumably write lock depends on pipe, not port select */ + pp_reg = PP_CONTROL(pipe); + panel_pipe = pipe; + } else { + u32 port_sel; + + pp_reg = PP_CONTROL(0); + port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; + + drm_WARN_ON(&dev_priv->drm, + port_sel != PANEL_PORT_SELECT_LVDS); + intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); + } + + val = intel_de_read(dev_priv, pp_reg); + if (!(val & PANEL_POWER_ON) || + ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) + locked = false; + + I915_STATE_WARN(panel_pipe == pipe && locked, + "panel assertion failure, pipe %c regs locked\n", + pipe_name(pipe)); +} diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h index fbbcca782e7b..fbb47f6f453e 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.h +++ b/drivers/gpu/drm/i915/display/intel_pps.h @@ -10,6 +10,7 @@ #include "intel_wakeref.h" +enum pipe; struct drm_i915_private; struct intel_connector; struct intel_crtc_state; @@ -49,4 +50,6 @@ void vlv_pps_init(struct intel_encoder *encoder, void intel_pps_unlock_regs_wa(struct drm_i915_private *i915); void intel_pps_setup(struct drm_i915_private *i915); +void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe); + #endif /* __INTEL_PPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1b0daf649e82..a1a663f362e7 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -22,18 +22,19 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> #include "display/intel_dp.h" #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_snps_phy.h" -#include "intel_sprite.h" #include "skl_universal_plane.h" /** @@ -364,41 +365,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) } } -static void hsw_psr_setup_aux(struct intel_dp *intel_dp) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 aux_clock_divider, aux_ctl; - int i; - static const u8 aux_msg[] = { - [0] = DP_AUX_NATIVE_WRITE << 4, - [1] = DP_SET_POWER >> 8, - [2] = DP_SET_POWER & 0xff, - [3] = 1 - 1, - [4] = DP_SET_POWER_D0, - }; - u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK | - EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK | - EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK | - EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK; - - BUILD_BUG_ON(sizeof(aux_msg) > 20); - for (i = 0; i < sizeof(aux_msg); i += 4) - intel_de_write(dev_priv, - EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2), - intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); - - aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); - - /* Start with bits set for DDI_AUX_CTL register */ - aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), - aux_clock_divider); - - /* Select only valid bits for SRD_AUX_CTL */ - aux_ctl &= psr_aux_mask; - intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder), - aux_ctl); -} - static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -460,7 +426,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) val |= EDP_PSR_TP2_TP3_TIME_2500us; check_tp3_sel: - if (intel_dp_source_supports_hbr2(intel_dp) && + if (intel_dp_source_supports_tps3(dev_priv) && drm_dp_tps3_supported(intel_dp->dpcd)) val |= EDP_PSR_TP1_TP3_SEL; else @@ -545,7 +511,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12) val |= EDP_Y_COORDINATE_ENABLE; - val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1); + val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); /* Wa_22012278275:adl-p */ @@ -595,15 +561,16 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_SU_SDP_SCANLINE; if (intel_dp->psr.psr2_sel_fetch_enabled) { + u32 tmp; + /* Wa_1408330847 */ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, DIS_RAM_BYPASS_PSR2_MAN_TRACK, DIS_RAM_BYPASS_PSR2_MAN_TRACK); - intel_de_write(dev_priv, - PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), - PSR2_MAN_TRK_CTL_ENABLE); + tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); + drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0); @@ -621,8 +588,8 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) { - if (DISPLAY_VER(dev_priv) < 9) - return false; + if (IS_ALDERLAKE_P(dev_priv)) + return trans == TRANSCODER_A || trans == TRANSCODER_B; else if (DISPLAY_VER(dev_priv) >= 12) return trans == TRANSCODER_A; else @@ -755,11 +722,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { - struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_plane_state *plane_state; - struct intel_plane *plane; - int i; if (!dev_priv->params.enable_psr2_sel_fetch && intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { @@ -774,14 +737,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 sel fetch not enabled, plane rotated\n"); - return false; - } - } - /* Wa_14010254185 Wa_14010103792 */ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { drm_dbg_kms(&dev_priv->drm, @@ -877,12 +832,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - /* - * We are missing the implementation of some workarounds to enabled PSR2 - * in Alderlake_P, until ready PSR2 should be kept disabled. - */ - if (IS_ALDERLAKE_P(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n"); + if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) { + drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n"); return false; } @@ -985,7 +936,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, } void intel_psr_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); const struct drm_display_mode *adjusted_mode = @@ -1037,7 +989,10 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); + crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); + intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, + &crtc_state->psr_vsc); } void intel_psr_get_config(struct intel_encoder *encoder, @@ -1114,12 +1069,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) enum transcoder cpu_transcoder = intel_dp->psr.transcoder; u32 mask; - /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ - * use hardcoded values PSR AUX transactions - */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hsw_psr_setup_aux(intel_dp); - if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) { i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); u32 chicken = intel_de_read(dev_priv, reg); @@ -1130,6 +1079,16 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) } /* + * Wa_16014451276:adlp + * All supported adlp panels have 1-based X granularity, this may + * cause issues if non-supported panels are used. + */ + if (IS_ALDERLAKE_P(dev_priv) && + intel_dp->psr.psr2_enabled) + intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, + ADLP_1_BASED_X_GRANULARITY); + + /* * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also * mask LPSP to avoid dependency on other drivers that might block * runtime_pm besides preventing other hw tracking issues now we @@ -1174,6 +1133,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), TRANS_SET_CONTEXT_LATENCY_MASK, TRANS_SET_CONTEXT_LATENCY_VALUE(1)); + + /* Wa_16012604467:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled) + intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0, + CLKGATE_DIS_MISC_DMASC_GATING_DIS); } static bool psr_interrupt_error_check(struct intel_dp *intel_dp) @@ -1208,8 +1172,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) } static void intel_psr_enable_locked(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1236,9 +1199,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", intel_dp->psr.psr2_enabled ? "2" : "1"); - intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state, - &intel_dp->psr.vsc); - intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc); + intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); intel_snps_phy_update_psr_power_state(dev_priv, phy, true); intel_psr_enable_sink(intel_dp); intel_psr_enable_source(intel_dp); @@ -1248,33 +1209,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_psr_activate(intel_dp); } -/** - * intel_psr_enable - Enable PSR - * @intel_dp: Intel DP - * @crtc_state: new CRTC state - * @conn_state: new CONNECTOR state - * - * This function can only be called after the pipe is fully trained and enabled. - */ -void intel_psr_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - - if (!CAN_PSR(intel_dp)) - return; - - if (!crtc_state->has_psr) - return; - - drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); - - mutex_lock(&intel_dp->psr.lock); - intel_psr_enable_locked(intel_dp, crtc_state, conn_state); - mutex_unlock(&intel_dp->psr.lock); -} - static void intel_psr_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1363,6 +1297,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), TRANS_SET_CONTEXT_LATENCY_MASK, 0); + /* Wa_16012604467:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled) + intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, + CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0); + intel_snps_phy_update_psr_power_state(dev_priv, phy, false); /* Disable PSR on Sink */ @@ -1409,6 +1348,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, */ void intel_psr_pause(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp)) @@ -1421,6 +1361,9 @@ void intel_psr_pause(struct intel_dp *intel_dp) return; } + /* If we ever hit this, we will need to add refcount to pause/resume */ + drm_WARN_ON(&dev_priv->drm, psr->paused); + intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); psr->paused = true; @@ -1456,27 +1399,48 @@ unlock: mutex_unlock(&psr->lock); } +static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv) +{ + return IS_ALDERLAKE_P(dev_priv) ? + ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME : + PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; +} + static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (DISPLAY_VER(dev_priv) >= 9) - /* - * Display WA #0884: skl+ - * This documented WA for bxt can be safely applied - * broadly so we can force HW tracking to exit PSR - * instead of disabling and re-enabling. - * Workaround tells us to write 0 to CUR_SURFLIVE_A, - * but it makes more sense write to the current active - * pipe. - */ - intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); - else - /* - * A write to CURSURFLIVE do not cause HW tracking to exit PSR - * on older gens so doing the manual exit instead. - */ - intel_psr_exit(intel_dp); + if (intel_dp->psr.psr2_sel_fetch_enabled) + intel_de_rmw(dev_priv, + PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0, + man_trk_ctl_single_full_frame_bit_get(dev_priv)); + + /* + * Display WA #0884: skl+ + * This documented WA for bxt can be safely applied + * broadly so we can force HW tracking to exit PSR + * instead of disabling and re-enabling. + * Workaround tells us to write 0 to CUR_SURFLIVE_A, + * but it makes more sense write to the current active + * pipe. + * + * This workaround do not exist for platforms with display 10 or newer + * but testing proved that it works for up display 13, for newer + * than that testing will be needed. + */ + intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); +} + +void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, @@ -1487,17 +1451,17 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; const struct drm_rect *clip; - u32 val, offset; - int ret, x, y; + u32 val; + int x, y; if (!crtc_state->enable_psr2_sel_fetch) return; - val = plane_state ? plane_state->ctl : 0; - val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE; - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val); - if (!val || plane->id == PLANE_CURSOR) + if (plane->id == PLANE_CURSOR) { + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), + plane_state->ctl); return; + } clip = &plane_state->psr2_sel_fetch_area; @@ -1505,14 +1469,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val |= plane_state->uapi.dst.x1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); - /* TODO: consider auxiliary surfaces */ - x = plane_state->uapi.src.x1 >> 16; - y = (plane_state->uapi.src.y1 >> 16) + clip->y1; - ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); - if (ret) - drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n", - ret); + x = plane_state->view.color_plane[color_plane].x; + + /* + * From Bspec: UV surface Start Y Position = half of Y plane Y + * start position. + */ + if (!color_plane) + y = plane_state->view.color_plane[color_plane].y + clip->y1; + else + y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; + val = y << 16 | x; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), val); @@ -1520,14 +1489,16 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); + + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), + PLANE_SEL_FETCH_CTL_ENABLE); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - if (!HAS_PSR2_SEL_FETCH(dev_priv) || - !crtc_state->enable_psr2_sel_fetch) + if (!crtc_state->enable_psr2_sel_fetch) return; intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder), @@ -1542,11 +1513,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, u32 val = PSR2_MAN_TRK_CTL_ENABLE; if (full_update) { - if (IS_ALDERLAKE_P(dev_priv)) - val |= ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; - else - val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME; - + /* + * Not applying Wa_14014971508:adlp as we do not support the + * feature that requires this workaround. + */ + val |= man_trk_ctl_single_full_frame_bit_get(dev_priv); goto exit; } @@ -1555,7 +1526,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, if (IS_ALDERLAKE_P(dev_priv)) { val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1); - val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2); + val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1); } else { drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); @@ -1597,6 +1568,41 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n"); } +/* + * TODO: Not clear how to handle planes with negative position, + * also planes are not updated if they have a negative X + * position so for now doing a full update in this cases + * + * Plane scaling and rotation is not supported by selective fetch and both + * properties can change without a modeset, so need to be check at every + * atomic commmit. + */ +static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state) +{ + if (plane_state->uapi.dst.y1 < 0 || + plane_state->uapi.dst.x1 < 0 || + plane_state->scaler_id >= 0 || + plane_state->uapi.rotation != DRM_MODE_ROTATE_0) + return false; + + return true; +} + +/* + * Check for pipe properties that is not supported by selective fetch. + * + * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed + * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch + * enabled and going to the full update path. + */ +static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->scaler_state.scaler_id >= 0) + return false; + + return true; +} + int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -1610,9 +1616,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (!crtc_state->enable_psr2_sel_fetch) return 0; - ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); - if (ret) - return ret; + if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) { + full_update = true; + goto skip_sel_fetch_set_loop; + } /* * Calculate minimal selective fetch area of each plane and calculate @@ -1623,8 +1630,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect src, damaged_area = { .y1 = -1 }; - struct drm_mode_rect *damaged_clips; - u32 num_clips, j; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect clip; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) continue; @@ -1633,19 +1640,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, !old_plane_state->uapi.visible) continue; - /* - * TODO: Not clear how to handle planes with negative position, - * also planes are not updated if they have a negative X - * position so for now doing a full update in this cases - */ - if (new_plane_state->uapi.dst.y1 < 0 || - new_plane_state->uapi.dst.x1 < 0) { + if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { full_update = true; break; } - num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi); - /* * If visibility or plane moved, mark the whole plane area as * damaged as it needs to be complete redraw in the new and old @@ -1666,14 +1665,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, clip_area_update(&pipe_clip, &damaged_area); } continue; - } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha || - (!num_clips && - new_plane_state->uapi.fb != old_plane_state->uapi.fb)) { - /* - * If the plane don't have damaged areas but the - * framebuffer changed or alpha changed, mark the whole - * plane area as damaged. - */ + } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) { + /* If alpha changed mark the whole plane area as damaged */ damaged_area.y1 = new_plane_state->uapi.dst.y1; damaged_area.y2 = new_plane_state->uapi.dst.y2; clip_area_update(&pipe_clip, &damaged_area); @@ -1681,15 +1674,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, } drm_rect_fp_to_int(&src, &new_plane_state->uapi.src); - damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi); - for (j = 0; j < num_clips; j++) { - struct drm_rect clip; - - clip.x1 = damaged_clips[j].x1; - clip.y1 = damaged_clips[j].y1; - clip.x2 = damaged_clips[j].x2; - clip.y2 = damaged_clips[j].y2; + drm_atomic_helper_damage_iter_init(&iter, + &old_plane_state->uapi, + &new_plane_state->uapi); + drm_atomic_for_each_plane_damage(&iter, &clip) { if (drm_rect_intersect(&clip, &src)) clip_area_update(&damaged_area, &clip); } @@ -1705,6 +1694,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (full_update) goto skip_sel_fetch_set_loop; + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) + return ret; + intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip); /* @@ -1714,6 +1707,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect *sel_fetch_area, inter; + struct intel_plane *linked = new_plane_state->planar_linked_plane; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || !new_plane_state->uapi.visible) @@ -1723,10 +1717,33 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) continue; + if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) { + full_update = true; + break; + } + sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; crtc_state->update_planes |= BIT(plane->id); + + /* + * Sel_fetch_area is calculated for UV plane. Use + * same area for Y plane as well. + */ + if (linked) { + struct intel_plane_state *linked_new_plane_state; + struct drm_rect *linked_sel_fetch_area; + + linked_new_plane_state = intel_atomic_get_plane_state(state, linked); + if (IS_ERR(linked_new_plane_state)) + return PTR_ERR(linked_new_plane_state); + + linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; + linked_sel_fetch_area->y1 = sel_fetch_area->y1; + linked_sel_fetch_area->y2 = sel_fetch_area->y2; + crtc_state->update_planes |= BIT(linked->id); + } } skip_sel_fetch_set_loop: @@ -1734,69 +1751,102 @@ skip_sel_fetch_set_loop: return 0; } -/** - * intel_psr_update - Update PSR state - * @intel_dp: Intel DP - * @crtc_state: new CRTC state - * @conn_state: new CONNECTOR state - * - * This functions will update PSR states, disabling, enabling or switching PSR - * version when executing fastsets. For full modeset, intel_psr_disable() and - * intel_psr_enable() should be called instead. - */ -void intel_psr_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_psr *psr = &intel_dp->psr; - bool enable, psr2_enable; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_encoder *encoder; - if (!CAN_PSR(intel_dp)) + if (!HAS_PSR(i915)) return; - mutex_lock(&intel_dp->psr.lock); + for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, + crtc_state->uapi.encoder_mask) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_psr *psr = &intel_dp->psr; + bool needs_to_disable = false; + + mutex_lock(&psr->lock); + + /* + * Reasons to disable: + * - PSR disabled in new state + * - All planes will go inactive + * - Changing between PSR versions + */ + needs_to_disable |= intel_crtc_needs_modeset(crtc_state); + needs_to_disable |= !crtc_state->has_psr; + needs_to_disable |= !crtc_state->active_planes; + needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; + + if (psr->enabled && needs_to_disable) + intel_psr_disable_locked(intel_dp); + + mutex_unlock(&psr->lock); + } +} + +static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_encoder *encoder; + + if (!crtc_state->has_psr) + return; + + for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, + crtc_state->uapi.encoder_mask) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_psr *psr = &intel_dp->psr; + + mutex_lock(&psr->lock); - enable = crtc_state->has_psr; - psr2_enable = crtc_state->has_psr2; + drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes); + + /* Only enable if there is active planes */ + if (!psr->enabled && crtc_state->active_planes) + intel_psr_enable_locked(intel_dp, crtc_state); - if (enable == psr->enabled && psr2_enable == psr->psr2_enabled && - crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) { /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ if (crtc_state->crc_enabled && psr->enabled) psr_force_hw_tracking_exit(intel_dp); - else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) { - /* - * Activate PSR again after a force exit when enabling - * CRC in older gens - */ - if (!intel_dp->psr.active && - !intel_dp->psr.busy_frontbuffer_bits) - schedule_work(&intel_dp->psr.work); - } - goto unlock; + mutex_unlock(&psr->lock); } +} - if (psr->enabled) - intel_psr_disable_locked(intel_dp); +void intel_psr_post_plane_update(const struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; - if (enable) - intel_psr_enable_locked(intel_dp, crtc_state, conn_state); + if (!HAS_PSR(dev_priv)) + return; -unlock: - mutex_unlock(&intel_dp->psr.lock); + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) + _intel_psr_post_plane_update(state, crtc_state); } -/** - * psr_wait_for_idle - wait for PSR1 to idle - * @intel_dp: Intel DP - * @out_value: PSR status in case of failure - * - * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. - * - */ -static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) +static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* + * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. + * As all higher states has bit 4 of PSR2 state set we can just wait for + * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. + */ + return intel_de_wait_for_clear(dev_priv, + EDP_PSR2_STATUS(intel_dp->psr.transcoder), + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); +} + +static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1806,15 +1856,13 @@ static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, - EDP_PSR_STATUS(intel_dp->psr.transcoder), - EDP_PSR_STATUS_STATE_MASK, - EDP_PSR_STATUS_STATE_IDLE, 2, 50, - out_value); + return intel_de_wait_for_clear(dev_priv, + EDP_PSR_STATUS(intel_dp->psr.transcoder), + EDP_PSR_STATUS_STATE_MASK, 50); } /** - * intel_psr_wait_for_idle - wait for PSR1 to idle + * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state * * This function is expected to be called from pipe_update_start() where it is @@ -1831,19 +1879,23 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, new_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - u32 psr_status; + int ret; mutex_lock(&intel_dp->psr.lock); - if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) { + + if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); continue; } - /* when the PSR1 is enabled */ - if (psr_wait_for_idle(intel_dp, &psr_status)) - drm_err(&dev_priv->drm, - "PSR idle timed out 0x%x, atomic update may fail\n", - psr_status); + if (intel_dp->psr.psr2_enabled) + ret = _psr2_ready_for_pipe_update_locked(intel_dp); + else + ret = _psr1_ready_for_pipe_update_locked(intel_dp); + + if (ret) + drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); + mutex_unlock(&intel_dp->psr.lock); } } @@ -2065,20 +2117,16 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, /* * When we will be completely rely on PSR2 S/W tracking in future, * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP - * event also therefore tgl_dc3co_flush() require to be changed + * event also therefore tgl_dc3co_flush_locked() require to be changed * accordingly in future. */ static void -tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, - enum fb_op_origin origin) +tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - mutex_lock(&intel_dp->psr.lock); - - if (!intel_dp->psr.dc3co_exitline) - goto unlock; - - if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active) - goto unlock; + if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled || + !intel_dp->psr.active) + return; /* * At every frontbuffer flush flip event modified delay of delayed work, @@ -2086,14 +2134,11 @@ tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, */ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe))) - goto unlock; + return; tgl_psr2_enable_dc3co(intel_dp); mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work, intel_dp->psr.dc3co_exit_delay); - -unlock: - mutex_unlock(&intel_dp->psr.lock); } /** @@ -2118,11 +2163,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned int pipe_frontbuffer_bits = frontbuffer_bits; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (origin == ORIGIN_FLIP) { - tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin); - continue; - } - mutex_lock(&intel_dp->psr.lock); if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); @@ -2143,6 +2183,14 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, continue; } + if (origin == ORIGIN_FLIP || + (origin == ORIGIN_CURSOR_UPDATE && + !intel_dp->psr.psr2_sel_fetch_enabled)) { + tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin); + mutex_unlock(&intel_dp->psr.lock); + continue; + } + /* By definition flush = invalidate + flush */ if (pipe_frontbuffer_bits) psr_force_hw_tracking_exit(intel_dp); @@ -2186,23 +2234,12 @@ void intel_psr_init(struct intel_dp *intel_dp) intel_dp->psr.source_support = true; - if (IS_HASWELL(dev_priv)) - /* - * HSW don't have PSR registers on the same space as transcoder - * so set this to a value that when subtract to the register - * in transcoder space results in the right offset for HSW - */ - dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; - if (dev_priv->params.enable_psr == -1) - if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable) + if (!dev_priv->vbt.psr.enable) dev_priv->params.enable_psr = 0; /* Set link_standby x link_off defaults */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - /* HSW and BDW require workarounds that we don't implement. */ - intel_dp->psr.link_standby = false; - else if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(dev_priv) < 12) /* For new platforms up to TGL let's respect VBT back again */ intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link; diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 641521b101c8..f6526d9ccfdc 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -6,28 +6,26 @@ #ifndef __INTEL_PSR_H__ #define __INTEL_PSR_H__ -#include "intel_frontbuffer.h" +#include <linux/types.h> +enum fb_op_origin; struct drm_connector; struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; struct intel_crtc_state; struct intel_dp; -struct intel_crtc; -struct intel_atomic_state; -struct intel_plane_state; -struct intel_plane; struct intel_encoder; +struct intel_plane; +struct intel_plane_state; void intel_psr_init_dpcd(struct intel_dp *intel_dp); -void intel_psr_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_psr_post_plane_update(const struct intel_atomic_state *state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); -void intel_psr_update(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, @@ -37,7 +35,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_psr_init(struct intel_dp *intel_dp); void intel_psr_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state); + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state); void intel_psr_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir); @@ -51,6 +50,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, int color_plane); +void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 8a52b7a16774..c8488f5ebd04 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -5,6 +5,7 @@ #include <linux/dmi.h> +#include "i915_drv.h" #include "intel_display_types.h" #include "intel_quirks.h" diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 6cb27599ea03..76e1188b01d4 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1335,6 +1335,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, adjusted_mode); pipe_config->sdvo_tv_clock = true; } else if (IS_LVDS(intel_sdvo_connector)) { + int ret; + + ret = intel_panel_compute_config(&intel_sdvo_connector->base, + adjusted_mode); + if (ret) + return ret; + if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, intel_sdvo_connector->base.panel.fixed_mode)) return -EINVAL; @@ -1835,7 +1842,7 @@ static void intel_enable_sdvo(struct intel_atomic_state *state, intel_sdvo_write_sdvox(intel_sdvo, temp); for (i = 0; i < 2; i++) - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); /* @@ -1873,7 +1880,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -1890,14 +1896,11 @@ intel_sdvo_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; if (IS_LVDS(intel_sdvo_connector)) { - const struct drm_display_mode *fixed_mode = - intel_sdvo_connector->base.panel.fixed_mode; - - if (mode->hdisplay > fixed_mode->hdisplay) - return MODE_PANEL; + enum drm_mode_status status; - if (mode->vdisplay > fixed_mode->vdisplay) - return MODE_PANEL; + status = intel_panel_mode_valid(&intel_sdvo_connector->base, mode); + if (status != MODE_OK) + return status; } return MODE_OK; diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 18b52b64af95..09f405e4d363 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -5,6 +5,8 @@ #include <linux/util_macros.h> +#include "intel_ddi.h" +#include "intel_ddi_buf_trans.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_snps_phy.h" @@ -50,58 +52,28 @@ void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv, SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val); } -static const u32 dg2_ddi_translations[] = { - /* VS 0, pre-emph 0 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 26), - - /* VS 0, pre-emph 1 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 33) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 6), - - /* VS 0, pre-emph 2 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 38) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 12), - - /* VS 0, pre-emph 3 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 43) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 19), - - /* VS 1, pre-emph 0 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 39), - - /* VS 1, pre-emph 1 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 44) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 8), - - /* VS 1, pre-emph 2 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 47) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 15), - - /* VS 2, pre-emph 0 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 52), - - /* VS 2, pre-emph 1 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 51) | - REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 10), - - /* VS 3, pre-emph 0 */ - REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 62), -}; - -void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - u32 level) +void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; - n_entries = ARRAY_SIZE(dg2_ddi_translations); - if (level >= n_entries) - level = n_entries - 1; + trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); + if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) + return; + + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + u32 val = 0; + + val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing); + val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor); + val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor); - for (ln = 0; ln < 4; ln++) - intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), - dg2_ddi_translations[level]); + intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val); + } } /* @@ -198,11 +170,82 @@ static const struct intel_mpllb_state dg2_dp_hbr3_100 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), }; -static const struct intel_mpllb_state *dg2_dp_100_tables[] = { +static const struct intel_mpllb_state dg2_dp_uhbr10_100 = { + .clock = 1000000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 21) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 368), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), + + /* + * SSC will be enabled, DP UHBR has a minimum SSC requirement. + */ + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 58982), + .mpllb_sscstep = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 76101), +}; + +static const struct intel_mpllb_state dg2_dp_uhbr13_100 = { + .clock = 1350000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 45) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 508), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), + + /* + * SSC will be enabled, DP UHBR has a minimum SSC requirement. + */ + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 79626), + .mpllb_sscstep = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 102737), +}; + +static const struct intel_mpllb_state * const dg2_dp_100_tables[] = { &dg2_dp_rbr_100, &dg2_dp_hbr1_100, &dg2_dp_hbr2_100, &dg2_dp_hbr3_100, + &dg2_dp_uhbr10_100, + &dg2_dp_uhbr13_100, NULL, }; @@ -311,11 +354,89 @@ static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440), }; -static const struct intel_mpllb_state *dg2_dp_38_4_tables[] = { +static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = { + .clock = 1000000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 488), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 3), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 27306), + + /* + * SSC will be enabled, DP UHBR has a minimum SSC requirement. + */ + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 76800), + .mpllb_sscstep = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 129024), +}; + +static const struct intel_mpllb_state dg2_dp_uhbr13_38_4 = { + .clock = 1350000, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 56) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 670), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36864), + + /* + * SSC will be enabled, DP UHBR has a minimum SSC requirement. + */ + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 103680), + .mpllb_sscstep = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 174182), +}; + +static const struct intel_mpllb_state * const dg2_dp_38_4_tables[] = { &dg2_dp_rbr_38_4, &dg2_dp_hbr1_38_4, &dg2_dp_hbr2_38_4, &dg2_dp_hbr3_38_4, + &dg2_dp_uhbr10_38_4, + &dg2_dp_uhbr13_38_4, NULL, }; @@ -448,7 +569,7 @@ static const struct intel_mpllb_state dg2_edp_r432 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752), }; -static const struct intel_mpllb_state *dg2_edp_tables[] = { +static const struct intel_mpllb_state * const dg2_edp_tables[] = { &dg2_dp_rbr_100, &dg2_edp_r216, &dg2_edp_r243, @@ -611,7 +732,7 @@ static const struct intel_mpllb_state dg2_hdmi_594 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; -static const struct intel_mpllb_state *dg2_hdmi_tables[] = { +static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_25_175, &dg2_hdmi_27_0, &dg2_hdmi_74_25, @@ -620,7 +741,7 @@ static const struct intel_mpllb_state *dg2_hdmi_tables[] = { NULL, }; -static const struct intel_mpllb_state ** +static const struct intel_mpllb_state * const * intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { @@ -654,7 +775,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - const struct intel_mpllb_state **tables; + const struct intel_mpllb_state * const *tables; int i; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { @@ -850,7 +971,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder, int intel_snps_phy_check_hdmi_link_rate(int clock) { - const struct intel_mpllb_state **tables = dg2_hdmi_tables; + const struct intel_mpllb_state * const *tables = dg2_hdmi_tables; int i; for (i = 0; tables[i]; i++) { diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h index 6261ff88ef5c..11dcd6deb070 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.h +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h @@ -29,7 +29,7 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder, const struct intel_mpllb_state *pll_state); int intel_snps_phy_check_hdmi_link_rate(int clock); -void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder, - u32 level); +void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_SNPS_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 08116f41da26..2357a1301f48 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -40,14 +40,15 @@ #include <drm/drm_rect.h> #include "i915_drv.h" -#include "i915_trace.h" #include "i915_vgpu.h" +#include "i9xx_plane.h" #include "intel_atomic_plane.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_sprite.h" -#include "i9xx_plane.h" #include "intel_vrr.h" int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) @@ -118,7 +119,7 @@ static void i9xx_plane_linear_gamma(u16 gamma[8]) } static void -chv_update_csc(const struct intel_plane_state *plane_state) +chv_sprite_update_csc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -190,7 +191,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) #define COS_0 1 static void -vlv_update_clrc(const struct intel_plane_state *plane_state) +vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -393,7 +394,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } -static void vlv_update_gamma(const struct intel_plane_state *plane_state) +static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -417,45 +418,54 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +vlv_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), + plane_state->view.color_plane[0].mapping_stride); + intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), + ((crtc_h - 1) << 16) | (crtc_w - 1)); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +vlv_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; unsigned long irqflags; - u32 sprctl; sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); - /* Sizes are 0 based */ - crtc_w--; - crtc_h--; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), - plane_state->view.color_plane[0].stride); - intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), - (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), - (crtc_h << 16) | crtc_w); - intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) - chv_update_csc(plane_state); + chv_sprite_update_csc(plane_state); if (key->flags) { intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), @@ -466,6 +476,8 @@ vlv_update_plane(struct intel_plane *plane, key->max_value); } + intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); @@ -478,15 +490,15 @@ vlv_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - vlv_update_clrc(plane_state); - vlv_update_gamma(plane_state); + vlv_sprite_update_clrc(plane_state); + vlv_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -vlv_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +vlv_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -502,8 +514,8 @@ vlv_disable_plane(struct intel_plane *plane, } static bool -vlv_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +vlv_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -805,7 +817,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, i++; } -static void ivb_update_gamma(const struct intel_plane_state *plane_state) +static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -835,48 +847,56 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +ivb_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 sprctl, sprscale = 0; + u32 sprscale = 0; unsigned long irqflags; - sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +ivb_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; + unsigned long irqflags; + + sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), @@ -902,14 +922,14 @@ ivb_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - ivb_update_gamma(plane_state); + ivb_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -ivb_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +ivb_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -927,8 +947,8 @@ ivb_disable_plane(struct intel_plane *plane, } static bool -ivb_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +ivb_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1106,7 +1126,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return dvscntr; } -static void g4x_update_gamma(const struct intel_plane_state *plane_state) +static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1136,7 +1156,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17]) gamma[i] = (i << 10) / 16; } -static void ilk_update_gamma(const struct intel_plane_state *plane_state) +static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1163,47 +1183,55 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +g4x_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 dvssurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 dvscntr, dvsscale = 0; + u32 dvsscale = 0; unsigned long irqflags; - dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - crtc_w--; - crtc_h--; - if (crtc_w != src_w || crtc_h != src_h) - dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +g4x_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 dvssurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 dvscntr, linear_offset; + unsigned long irqflags; + + dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), @@ -1224,16 +1252,16 @@ g4x_update_plane(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) - g4x_update_gamma(plane_state); + g4x_sprite_update_gamma(plane_state); else - ilk_update_gamma(plane_state); + ilk_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -g4x_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +g4x_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1250,8 +1278,8 @@ g4x_disable_plane(struct intel_plane *plane, } static bool -g4x_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +g4x_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1299,7 +1327,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - unsigned int stride = plane_state->view.color_plane[0].stride; + unsigned int stride = plane_state->view.color_plane[0].mapping_stride; unsigned int cpp = fb->format->cpp[0]; unsigned int width_bytes; int min_width, min_height; @@ -1540,8 +1568,8 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, */ if (!ret && has_dst_key_in_primary_plane(dev_priv)) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, - to_intel_plane(plane)->pipe); + intel_crtc_for_pipe(dev_priv, + to_intel_plane(plane)->pipe); plane_state = drm_atomic_get_plane_state(state, crtc->base.primary); @@ -1567,7 +1595,7 @@ out: return ret; } -static const u32 g4x_plane_formats[] = { +static const u32 g4x_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -1575,13 +1603,7 @@ static const u32 g4x_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u64 i9xx_plane_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u32 snb_plane_formats[] = { +static const u32 snb_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, @@ -1594,7 +1616,7 @@ static const u32 snb_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 vlv_plane_formats[] = { +static const u32 vlv_sprite_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1629,13 +1651,8 @@ static const u32 chv_pipe_b_sprite_formats[] = { static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1655,13 +1672,8 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1686,13 +1698,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -1762,9 +1769,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, return plane; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - plane->update_plane = vlv_update_plane; - plane->disable_plane = vlv_disable_plane; - plane->get_hw_state = vlv_plane_get_hw_state; + plane->update_noarm = vlv_sprite_update_noarm; + plane->update_arm = vlv_sprite_update_arm; + plane->disable_arm = vlv_sprite_disable_arm; + plane->get_hw_state = vlv_sprite_get_hw_state; plane->check_plane = vlv_sprite_check; plane->max_stride = i965_plane_max_stride; plane->min_cdclk = vlv_plane_min_cdclk; @@ -1773,16 +1781,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, formats = chv_pipe_b_sprite_formats; num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); } else { - formats = vlv_plane_formats; - num_formats = ARRAY_SIZE(vlv_plane_formats); + formats = vlv_sprite_formats; + num_formats = ARRAY_SIZE(vlv_sprite_formats); } - modifiers = i9xx_plane_format_modifiers; plane_funcs = &vlv_sprite_funcs; } else if (DISPLAY_VER(dev_priv) >= 7) { - plane->update_plane = ivb_update_plane; - plane->disable_plane = ivb_disable_plane; - plane->get_hw_state = ivb_plane_get_hw_state; + plane->update_noarm = ivb_sprite_update_noarm; + plane->update_arm = ivb_sprite_update_arm; + plane->disable_arm = ivb_sprite_disable_arm; + plane->get_hw_state = ivb_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { @@ -1793,28 +1801,27 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->min_cdclk = ivb_sprite_min_cdclk; } - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); - modifiers = i9xx_plane_format_modifiers; + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - plane->update_plane = g4x_update_plane; - plane->disable_plane = g4x_disable_plane; - plane->get_hw_state = g4x_plane_get_hw_state; + plane->update_noarm = g4x_sprite_update_noarm; + plane->update_arm = g4x_sprite_update_arm; + plane->disable_arm = g4x_sprite_disable_arm; + plane->get_hw_state = g4x_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; plane->max_stride = g4x_sprite_max_stride; plane->min_cdclk = g4x_sprite_min_cdclk; - modifiers = i9xx_plane_format_modifiers; if (IS_SANDYBRIDGE(dev_priv)) { - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - formats = g4x_plane_formats; - num_formats = ARRAY_SIZE(g4x_plane_formats); + formats = g4x_sprite_formats; + num_formats = ARRAY_SIZE(g4x_sprite_formats); plane_funcs = &g4x_sprite_funcs; } @@ -1833,11 +1840,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->id = PLANE_SPRITE0 + sprite; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "sprite %c", sprite_name(pipe, sprite)); + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index c085eb87705c..4f63e4967731 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -27,14 +27,10 @@ struct intel_plane_state; #define VBLANK_EVASION_TIME_US 100 #endif -int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); -void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3ffece568ed9..40faa18947c9 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -12,44 +12,81 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) { static const char * const names[] = { + [TC_PORT_DISCONNECTED] = "disconnected", [TC_PORT_TBT_ALT] = "tbt-alt", [TC_PORT_DP_ALT] = "dp-alt", [TC_PORT_LEGACY] = "legacy", }; if (WARN_ON(mode >= ARRAY_SIZE(names))) - mode = TC_PORT_TBT_ALT; + mode = TC_PORT_DISCONNECTED; return names[mode]; } +static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, + enum tc_port_mode mode) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode; +} + +bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) +{ + return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT); +} + +bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port) +{ + return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT); +} + +bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port) +{ + return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); +} + +bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + + return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) || + IS_ALDERLAKE_P(i915); +} + static enum intel_display_power_domain -tc_cold_get_power_domain(struct intel_digital_port *dig_port) +tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) { - if (intel_tc_cold_requires_aux_pw(dig_port)) - return intel_legacy_aux_to_power_domain(dig_port->aux_ch); - else + if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) return POWER_DOMAIN_TC_COLD_OFF; + + return intel_legacy_aux_to_power_domain(dig_port->aux_ch); } static intel_wakeref_t -tc_cold_block(struct intel_digital_port *dig_port) +tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode, + enum intel_display_power_domain *domain) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum intel_display_power_domain domain; - if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port) - return 0; + *domain = tc_cold_get_power_domain(dig_port, mode); - domain = tc_cold_get_power_domain(dig_port); - return intel_display_power_get(i915, domain); + return intel_display_power_get(i915, *domain); +} + +static intel_wakeref_t +tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain) +{ + return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain); } static void -tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) +tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain, + intel_wakeref_t wakeref) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum intel_display_power_domain domain; /* * wakeref == -1, means some error happened saving save_depot_stack but @@ -59,8 +96,7 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) if (wakeref == 0) return; - domain = tc_cold_get_power_domain(dig_port); - intel_display_power_put_async(i915, domain, wakeref); + intel_display_power_put(i915, domain, wakeref); } static void @@ -69,11 +105,9 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); bool enabled; - if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port) - return; - enabled = intel_display_power_is_enabled(i915, - tc_cold_get_power_domain(dig_port)); + tc_cold_get_power_domain(dig_port, + dig_port->tc_mode)); drm_WARN_ON(&i915->drm, !enabled); } @@ -244,6 +278,11 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) struct intel_uncore *uncore = &i915->uncore; u32 val, mask = 0; + /* + * On ADL-P HW/FW will wake from TCCOLD to complete the read access of + * registers in IOM. Note that this doesn't apply to PHY and FIA + * registers. + */ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT) mask |= BIT(TC_PORT_DP_ALT); @@ -270,6 +309,14 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) return icl_tc_port_live_status_mask(dig_port); } +/* + * Return the PHY status complete flag indicating that display can acquire the + * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink + * is connected and it's ready to switch the ownership to display. The flag + * will be left cleared when a TBT-alt sink is connected, where the PHY is + * owned by the TBT subsystem and so switching the ownership to display is not + * required. + */ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -288,6 +335,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); } +/* + * Return the PHY status complete flag indicating that display can acquire the + * PHY ownership. The IOM firmware sets this flag when it's ready to switch + * the ownership to display, regardless of what sink is connected (TBT-alt, + * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT + * subsystem and so switching the ownership to display is not required. + */ static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -339,11 +393,6 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port, intel_uncore_write(uncore, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); - if (!take && wait_for(!tc_phy_status_complete(dig_port), 10)) - drm_dbg_kms(&i915->drm, - "Port %s: PHY complete clear timed out\n", - dig_port->tc_port_name); - return true; } @@ -429,6 +478,7 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port, int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + u32 live_status_mask; int max_lanes; if (!tc_phy_status_complete(dig_port)) { @@ -437,6 +487,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port, goto out_set_tbt_alt_mode; } + live_status_mask = tc_port_live_status_mask(dig_port); + if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) { + drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n", + dig_port->tc_port_name, live_status_mask); + goto out_set_tbt_alt_mode; + } + if (!tc_phy_take_ownership(dig_port, true) && !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) goto out_set_tbt_alt_mode; @@ -485,14 +542,13 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) { switch (dig_port->tc_mode) { case TC_PORT_LEGACY: - /* Nothing to do, we never disconnect from legacy mode */ - break; case TC_PORT_DP_ALT: tc_phy_take_ownership(dig_port, false); - dig_port->tc_mode = TC_PORT_TBT_ALT; - break; + fallthrough; case TC_PORT_TBT_ALT: - /* Nothing to do, we stay in TBT-alt mode */ + dig_port->tc_mode = TC_PORT_DISCONNECTED; + fallthrough; + case TC_PORT_DISCONNECTED: break; default: MISSING_CASE(dig_port->tc_mode); @@ -509,6 +565,10 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) return dig_port->tc_mode == TC_PORT_TBT_ALT; } + /* On ADL-P the PHY complete flag is set in TBT mode as well. */ + if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT) + return true; + if (!tc_phy_is_owned(dig_port)) { drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n", dig_port->tc_port_name); @@ -550,9 +610,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) if (live_status_mask) return fls(live_status_mask) - 1; - return tc_phy_status_complete(dig_port) && - dig_port->tc_legacy_port ? TC_PORT_LEGACY : - TC_PORT_TBT_ALT; + return TC_PORT_TBT_ALT; } static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, @@ -581,6 +639,43 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, tc_port_mode_name(dig_port->tc_mode)); } +static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) +{ + return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; +} + +static void intel_tc_port_update_mode(struct intel_digital_port *dig_port, + int required_lanes, bool force_disconnect) +{ + enum intel_display_power_domain domain; + intel_wakeref_t wref; + bool needs_reset = force_disconnect; + + if (!needs_reset) { + /* Get power domain required to check the hotplug live status. */ + wref = tc_cold_block(dig_port, &domain); + needs_reset = intel_tc_port_needs_reset(dig_port); + tc_cold_unblock(dig_port, domain, wref); + } + + if (!needs_reset) + return; + + /* Get power domain required for resetting the mode. */ + wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain); + + intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect); + + /* Get power domain matching the new mode after reset. */ + tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, + fetch_and_zero(&dig_port->tc_lock_wakeref)); + if (dig_port->tc_mode != TC_PORT_DISCONNECTED) + dig_port->tc_lock_wakeref = tc_cold_block(dig_port, + &dig_port->tc_lock_power_domain); + + tc_cold_unblock(dig_port, domain, wref); +} + static void intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, int refcount) @@ -595,45 +690,42 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_encoder *encoder = &dig_port->base; - intel_wakeref_t tc_cold_wref; int active_links = 0; mutex_lock(&dig_port->tc_lock); - tc_cold_wref = tc_cold_block(dig_port); - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); if (dig_port->dp.is_mst) active_links = intel_dp_mst_encoder_active_links(dig_port); else if (encoder->base.crtc) active_links = to_intel_crtc(encoder->base.crtc)->active; + drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); + drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); if (active_links) { + enum intel_display_power_domain domain; + intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain); + + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (!icl_tc_phy_is_connected(dig_port)) drm_dbg_kms(&i915->drm, "Port %s: PHY disconnected with %d active link(s)\n", dig_port->tc_port_name, active_links); intel_tc_port_link_init_refcount(dig_port, active_links); - goto out; - } + dig_port->tc_lock_wakeref = tc_cold_block(dig_port, + &dig_port->tc_lock_power_domain); - if (dig_port->tc_legacy_port) - icl_tc_phy_connect(dig_port, 1); + tc_cold_unblock(dig_port, domain, tc_cold_wref); + } -out: drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); - tc_cold_unblock(dig_port, tc_cold_wref); mutex_unlock(&dig_port->tc_lock); } -static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) -{ - return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; -} - /* * The type-C ports are different because even when they are connected, they may * not be available/usable by the graphics driver: see the comment on @@ -648,78 +740,79 @@ bool intel_tc_port_connected(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); bool is_connected; - intel_wakeref_t tc_cold_wref; intel_tc_port_lock(dig_port); - tc_cold_wref = tc_cold_block(dig_port); is_connected = tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); - tc_cold_unblock(dig_port, tc_cold_wref); intel_tc_port_unlock(dig_port); return is_connected; } static void __intel_tc_port_lock(struct intel_digital_port *dig_port, - int required_lanes, bool force_disconnect) + int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - intel_wakeref_t wakeref; - - wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&dig_port->tc_lock); - if (!dig_port->tc_link_refcount) { - intel_wakeref_t tc_cold_wref; - - tc_cold_wref = tc_cold_block(dig_port); + cancel_delayed_work(&dig_port->tc_disconnect_phy_work); - if (force_disconnect || intel_tc_port_needs_reset(dig_port)) - intel_tc_port_reset_mode(dig_port, required_lanes, - force_disconnect); - - tc_cold_unblock(dig_port, tc_cold_wref); - } + if (!dig_port->tc_link_refcount) + intel_tc_port_update_mode(dig_port, required_lanes, + false); - drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); - dig_port->tc_lock_wakeref = wakeref; + drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED); + drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT && + !tc_phy_is_owned(dig_port)); } void intel_tc_port_lock(struct intel_digital_port *dig_port) { - __intel_tc_port_lock(dig_port, 1, false); + __intel_tc_port_lock(dig_port, 1); } -void intel_tc_port_unlock(struct intel_digital_port *dig_port) +/** + * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port + * @dig_port: digital port + * + * Disconnect the given digital port from its TypeC PHY (handing back the + * control of the PHY to the TypeC subsystem). This will happen in a delayed + * manner after each aux transactions and modeset disables. + */ +static void intel_tc_port_disconnect_phy_work(struct work_struct *work) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref); + struct intel_digital_port *dig_port = + container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work); - mutex_unlock(&dig_port->tc_lock); + mutex_lock(&dig_port->tc_lock); + + if (!dig_port->tc_link_refcount) + intel_tc_port_update_mode(dig_port, 1, true); - intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE, - wakeref); + mutex_unlock(&dig_port->tc_lock); } /** - * intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port + * intel_tc_port_flush_work: flush the work disconnecting the PHY * @dig_port: digital port * - * Disconnect the given digital port from its TypeC PHY (handing back the - * control of the PHY to the TypeC subsystem). The only purpose of this - * function is to force the disconnect even with a TypeC display output still - * plugged to the TypeC connector, which is required by the TypeC firmwares - * during system suspend and shutdown. Otherwise - during the unplug event - * handling - the PHY ownership is released automatically by - * intel_tc_port_reset_mode(), when calling this function is not required. + * Flush the delayed work disconnecting an idle PHY. */ -void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port) +void intel_tc_port_flush_work(struct intel_digital_port *dig_port) { - __intel_tc_port_lock(dig_port, 1, true); - intel_tc_port_unlock(dig_port); + flush_delayed_work(&dig_port->tc_disconnect_phy_work); +} + +void intel_tc_port_unlock(struct intel_digital_port *dig_port) +{ + if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED) + queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work, + msecs_to_jiffies(1000)); + + mutex_unlock(&dig_port->tc_lock); } bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) @@ -731,21 +824,30 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { - __intel_tc_port_lock(dig_port, required_lanes, false); + __intel_tc_port_lock(dig_port, required_lanes); dig_port->tc_link_refcount++; intel_tc_port_unlock(dig_port); } void intel_tc_port_put_link(struct intel_digital_port *dig_port) { - mutex_lock(&dig_port->tc_lock); - dig_port->tc_link_refcount--; - mutex_unlock(&dig_port->tc_lock); + intel_tc_port_lock(dig_port); + --dig_port->tc_link_refcount; + intel_tc_port_unlock(dig_port); + + /* + * Disconnecting the PHY after the PHY's PLL gets disabled may + * hang the system on ADL-P, so disconnect the PHY here synchronously. + * TODO: remove this once the root cause of the ordering requirement + * is found/fixed. + */ + intel_tc_port_flush_work(dig_port); } static bool tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) { + enum intel_display_power_domain domain; intel_wakeref_t wakeref; u32 val; @@ -753,9 +855,9 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig return false; mutex_lock(&dig_port->tc_lock); - wakeref = tc_cold_block(dig_port); + wakeref = tc_cold_block(dig_port, &domain); val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); - tc_cold_unblock(dig_port, wakeref); + tc_cold_unblock(dig_port, domain, wakeref); mutex_unlock(&dig_port->tc_lock); drm_WARN_ON(&i915->drm, val == 0xffffffff); @@ -795,15 +897,9 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) "%c/TC#%d", port_name(port), tc_port + 1); mutex_init(&dig_port->tc_lock); + INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work); dig_port->tc_legacy_port = is_legacy; + dig_port->tc_mode = TC_PORT_DISCONNECTED; dig_port->tc_link_refcount = 0; tc_port_load_fia_params(i915, dig_port); } - -bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - - return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) || - IS_ALDERLAKE_P(i915); -} diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 0c881f645e27..6b47b29f551c 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -12,8 +12,11 @@ struct intel_digital_port; struct intel_encoder; +bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port); +bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port); +bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); + bool intel_tc_port_connected(struct intel_encoder *encoder); -void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); @@ -24,6 +27,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, void intel_tc_port_sanitize(struct intel_digital_port *dig_port); void intel_tc_port_lock(struct intel_digital_port *dig_port); void intel_tc_port_unlock(struct intel_digital_port *dig_port); +void intel_tc_port_flush_work(struct intel_digital_port *dig_port); void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_put_link(struct intel_digital_port *dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index d02f09f7e750..8a39989b87ad 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -36,6 +36,7 @@ #include "i915_drv.h" #include "intel_connector.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_hotplug.h" @@ -924,8 +925,7 @@ intel_enable_tv(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(dev); /* Prevents vblank waits from timing out in intel_tv_detect_type() */ - intel_wait_for_vblank(dev_priv, - to_intel_crtc(pipe_config->uapi.crtc)->pipe); + intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); intel_de_write(dev_priv, TV_CTL, intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); @@ -1529,7 +1529,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state, intel_de_write(dev_priv, TV_CLR_LEVEL, ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); - assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); + assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder); /* Filter ctl must be set before TV_WIN_SIZE */ tv_filter_ctl = TV_AUTO_SCALE; @@ -1618,7 +1618,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_write(dev_priv, TV_DAC, tv_dac); intel_de_posting_read(dev_priv, TV_DAC); - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); type = -1; tv_dac = intel_de_read(dev_priv, TV_DAC); @@ -1651,7 +1651,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ - intel_wait_for_vblank(dev_priv, crtc->pipe); + intel_crtc_wait_for_next_vblank(crtc); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 330077c2e588..f043d85ba64d 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -330,7 +330,12 @@ enum vbt_gmbus_ddi { ADLS_DDC_BUS_PORT_TC1 = 0x2, ADLS_DDC_BUS_PORT_TC2, ADLS_DDC_BUS_PORT_TC3, - ADLS_DDC_BUS_PORT_TC4 + ADLS_DDC_BUS_PORT_TC4, + ADLP_DDC_BUS_PORT_TC1 = 0x3, + ADLP_DDC_BUS_PORT_TC2, + ADLP_DDC_BUS_PORT_TC3, + ADLP_DDC_BUS_PORT_TC4 + }; #define DP_AUX_A 0x40 @@ -814,6 +819,11 @@ struct lfp_brightness_level { u16 reserved; } __packed; +#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \ + offsetof(struct bdb_lfp_backlight_data, brightness_level) +#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \ + offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits) + struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index df3286aa6999..9b05f93ed8bc 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -6,12 +6,14 @@ * Manasi Navare <manasi.d.navare@intel.com> */ #include <linux/limits.h> + #include "i915_drv.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" -#include "intel_vdsc.h" #include "intel_qp_tables.h" +#include "intel_vdsc.h" enum ROW_INDEX_BPP { ROW_INDEX_6BPP = 0, @@ -357,11 +359,9 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) return false; } -static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state) +static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { - const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - const struct drm_i915_private *i915 = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (DISPLAY_VER(i915) >= 12) return true; @@ -444,10 +444,10 @@ calculate_rc_params(struct rc_parameters *rc, } } -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; u16 compressed_bpp = pipe_config->dsc.compressed_bpp; const struct rc_parameters *rc_params; @@ -547,9 +547,8 @@ int intel_dsc_compute_params(struct intel_encoder *encoder, } enum intel_display_power_domain -intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) +intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -566,7 +565,7 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) */ if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A) return POWER_DOMAIN_TRANSCODER_VDSC_PW2; - else if (is_pipe_dsc(crtc_state)) + else if (is_pipe_dsc(crtc, cpu_transcoder)) return POWER_DOMAIN_PIPE(pipe); else return POWER_DOMAIN_TRANSCODER_VDSC_PW2; @@ -577,6 +576,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; u32 pps_val = 0; u32 rc_buf_thresh_dword[4]; @@ -600,8 +600,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; - drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, pps_val); /* @@ -624,8 +624,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) /* Populate PICTURE_PARAMETER_SET_1 registers */ pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); - drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, pps_val); /* @@ -649,8 +649,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); - drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* @@ -674,8 +674,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); - drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, pps_val); /* @@ -699,8 +699,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); - drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, pps_val); /* @@ -724,8 +724,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); - drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, pps_val); /* @@ -751,8 +751,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); - drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, pps_val); /* @@ -776,8 +776,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); - drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, pps_val); /* @@ -801,8 +801,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); - drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, pps_val); /* @@ -826,8 +826,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); - drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, pps_val); /* @@ -853,8 +853,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); - drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, pps_val); /* @@ -881,8 +881,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) vdsc_cfg->slice_width) | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); - drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); - if (!is_pipe_dsc(crtc_state)) { + drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, pps_val); /* @@ -908,10 +908,10 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); - drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i, - rc_buf_thresh_dword[i / 4]); + drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, + rc_buf_thresh_dword[i / 4]); } - if (!is_pipe_dsc(crtc_state)) { + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW, @@ -965,10 +965,10 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); - drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i, - rc_range_params_dword[i / 2]); + drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, + rc_range_params_dword[i / 2]); } - if (!is_pipe_dsc(crtc_state)) { + if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW, @@ -1057,8 +1057,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) } } -static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); @@ -1066,6 +1066,9 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, struct drm_dsc_picture_parameter_set pps; enum port port; + if (!crtc_state->dsc.compression_enable) + return; + drm_dsc_pps_payload_pack(&pps, vdsc_cfg); for_each_dsi_port(port, intel_dsi->ports) { @@ -1076,14 +1079,16 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, } } -static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; + if (!crtc_state->dsc.compression_enable) + return; + /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); @@ -1095,39 +1100,28 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, sizeof(dp_dsc_pps_sdp)); } -static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state) +static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { - enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - - return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1; + return is_pipe_dsc(crtc, cpu_transcoder) ? + ICL_PIPE_DSS_CTL1(crtc->pipe) : DSS_CTL1; } -static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state) +static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder) { - enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; - - return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2; -} - -static struct intel_crtc * -_get_crtc_for_pipe(struct drm_i915_private *i915, enum pipe pipe) -{ - if (!intel_pipe_valid(i915, pipe)) - return NULL; - - return intel_get_crtc_for_pipe(i915, pipe); + return is_pipe_dsc(crtc, cpu_transcoder) ? + ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2; } struct intel_crtc * intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc) { - return _get_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); + return intel_crtc_for_pipe(to_i915(primary_crtc->base.dev), primary_crtc->pipe + 1); } static struct intel_crtc * intel_dsc_get_bigjoiner_primary(const struct intel_crtc *secondary_crtc) { - return _get_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); + return intel_crtc_for_pipe(to_i915(secondary_crtc->base.dev), secondary_crtc->pipe - 1); } void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) @@ -1142,12 +1136,11 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) else dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER; - intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val); + intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); } } -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1159,13 +1152,6 @@ void intel_dsc_enable(struct intel_encoder *encoder, intel_dsc_pps_configure(crtc_state); - if (!crtc_state->bigjoiner_slave) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - intel_dsc_dsi_pps_write(encoder, crtc_state); - else - intel_dsc_dp_pps_write(encoder, crtc_state); - } - dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; @@ -1176,8 +1162,8 @@ void intel_dsc_enable(struct intel_encoder *encoder, if (!crtc_state->bigjoiner_slave) dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE; } - intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val); - intel_de_write(dev_priv, dss_ctl2_reg(crtc_state), dss_ctl2_val); + intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val); + intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val); } void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) @@ -1188,8 +1174,8 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) /* Disable only if either of them is enabled */ if (old_crtc_state->dsc.compression_enable || old_crtc_state->bigjoiner) { - intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0); - intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0); + intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0); + intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0); } } @@ -1199,7 +1185,7 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dss_ctl1; - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state)); + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder)); if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) { crtc_state->bigjoiner = true; crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc); @@ -1214,9 +1200,10 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state) void intel_dsc_get_config(struct intel_crtc_state *crtc_state) { - struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; enum intel_display_power_domain power_domain; intel_wakeref_t wakeref; @@ -1225,14 +1212,14 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state) if (!intel_dsc_source_support(crtc_state)) return; - power_domain = intel_dsc_power_domain(crtc_state); + power_domain = intel_dsc_power_domain(crtc, cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) return; - dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state)); - dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc_state)); + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder)); + dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder)); crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; if (!crtc_state->dsc.compression_enable) @@ -1256,7 +1243,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state) /* FIXME: add more state readout as needed */ /* PPS1 */ - if (!is_pipe_dsc(crtc_state)) + if (!is_pipe_dsc(crtc, cpu_transcoder)) val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); else val = intel_de_read(dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index dfb1fd38deb4..4ec75f715986 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -8,20 +8,24 @@ #include <linux/types.h> -struct intel_encoder; +enum transcoder; +struct intel_crtc; struct intel_crtc_state; +struct intel_encoder; bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state); -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void intel_dsc_enable(const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config); +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state); void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain -intel_dsc_power_domain(const struct intel_crtc_state *crtc_state); +intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc); +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VDSC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index c335b1dbafcf..139e8936edc5 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -60,7 +60,7 @@ intel_vrr_check_modeset(struct intel_atomic_state *state) * Between those two points the vblank exit starts (and hence registers get * latched) ASAP after a push is sent. * - * framestart_delay is programmable 0-3. + * framestart_delay is programmable 1-4. */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { @@ -138,13 +138,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, i915->window2_delay; else /* - * FIXME: s/4/framestart_delay+1/ to get consistent + * FIXME: s/4/framestart_delay/ to get consistent * earliest/latest points for register latching regardless * of the framestart_delay used? * * FIXME: this really needs the extra scanline to provide consistent * behaviour for all framestart_delay values. Otherwise with - * framestart_delay==3 we will end up extending the min vblank by + * framestart_delay==4 we will end up extending the min vblank by * one extra line. */ crtc_state->vrr.pipeline_full = @@ -193,6 +193,18 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) TRANS_PUSH_EN | TRANS_PUSH_SEND); } +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!crtc_state->vrr.enable) + return false; + + return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; +} + void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 96f9c9c27ab9..1c2da572693d 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -23,6 +23,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, void intel_vrr_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state); void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state); void intel_vrr_get_config(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 37eabeff8197..c2e94118566b 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -4,6 +4,7 @@ */ #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "skl_scaler.h" #include "skl_universal_plane.h" diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 724e7b04f3b6..d5359cf3d270 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -13,11 +13,13 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" +#include "intel_fbc.h" #include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "pxp/intel_pxp.h" static const u32 skl_plane_formats[] = { DRM_FORMAT_C8, @@ -162,50 +164,6 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const u64 skl_plane_format_modifiers_noccs[] = { - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 skl_plane_format_modifiers_ccs[] = { - I915_FORMAT_MOD_Yf_TILED_CCS, - I915_FORMAT_MOD_Y_TILED_CCS, - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_mc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_rc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 adlp_step_a_plane_format_modifiers[] = { - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) { switch (format) { @@ -463,9 +421,19 @@ static int icl_plane_min_width(const struct drm_framebuffer *fb, } } -static int icl_plane_max_width(const struct drm_framebuffer *fb, - int color_plane, - unsigned int rotation) +static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) +{ + if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) + return 4096; + else + return 5120; +} + +static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb, + int color_plane, + unsigned int rotation) { return 5120; } @@ -632,7 +600,7 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - u32 stride = plane_state->view.color_plane[color_plane].stride; + u32 stride = plane_state->view.color_plane[color_plane].scanout_stride; if (color_plane >= fb->format->num_planes) return 0; @@ -641,8 +609,8 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, } static void -skl_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +skl_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; @@ -656,6 +624,7 @@ skl_disable_plane(struct intel_plane *plane, skl_write_plane_wm(plane, crtc_state); + intel_psr2_disable_plane_sel_fetch(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); @@ -714,13 +683,13 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XYUV8888: return PLANE_CTL_FORMAT_XYUV; case DRM_FORMAT_YUYV: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV; case DRM_FORMAT_YVYU: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU; case DRM_FORMAT_UYVY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY; case DRM_FORMAT_VYUY: - return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; + return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY; case DRM_FORMAT_NV12: return PLANE_CTL_FORMAT_NV12; case DRM_FORMAT_P010: @@ -983,6 +952,9 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } + if (plane_state->force_black) + plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; + return plane_color_ctl; } @@ -993,6 +965,11 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, u32 offset = plane_state->view.color_plane[color_plane].offset; if (intel_fb_uses_dpt(fb)) { + /* + * The DPT object contains only one vma, so the VMA's offset + * within the DPT is always 0. + */ + WARN_ON(plane_state->dpt_vma->node.start); WARN_ON(offset & 0x1fffff); return offset >> 9; } else { @@ -1001,47 +978,60 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, } } +static u32 skl_plane_surf(const struct intel_plane_state *plane_state, + int color_plane) +{ + u32 plane_surf; + + plane_surf = intel_plane_ggtt_offset(plane_state) + + skl_surf_address(plane_state, color_plane); + + if (plane_state->decrypt) + plane_surf |= PLANE_SURF_DECRYPT; + + return plane_surf; +} + +static void icl_plane_csc_load_black(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); + + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); + + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); +} + static void -skl_program_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_program_plane_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = skl_surf_address(plane_state, color_plane); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = skl_main_to_aux_plane(fb, color_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; - u32 x = plane_state->view.color_plane[color_plane].x; - u32 y = plane_state->view.color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u8 alpha = plane_state->hw.alpha >> 8; - u32 plane_color_ctl = 0, aux_dist = 0; unsigned long irqflags; - u32 keymsk, keymax; - u32 plane_ctl = plane_state->ctl; - - plane_ctl |= skl_plane_ctl_crtc(crtc_state); - - if (DISPLAY_VER(dev_priv) >= 10) - plane_color_ctl = plane_state->color_ctl | - glk_plane_color_ctl_crtc(crtc_state); - - /* Sizes are 0 based */ - src_w--; - src_h--; - - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { @@ -1049,40 +1039,83 @@ skl_program_plane(struct intel_plane *plane, crtc_y = 0; } - if (aux_plane) { - aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr; - - if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); - } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + /* + * FIXME: pxp session invalidation can hit any time even at time of commit + * or after the commit, display content will be garbage. + */ + if (plane_state->force_black) + icl_plane_csc_load_black(plane); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - (src_h << 16) | src_w); + ((src_h - 1) << 16) | (src_w - 1)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), + lower_32_bits(plane_state->ccval)); + intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1), + upper_32_bits(plane_state->ccval)); + } if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); - if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) - intel_uncore_write64_fw(&dev_priv->uncore, - PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); - skl_write_plane_wm(plane, crtc_state); + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +skl_program_plane_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + u32 x = plane_state->view.color_plane[color_plane].x; + u32 y = plane_state->view.color_plane[color_plane].y; + u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; + u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_ctl = plane_state->ctl; + unsigned long irqflags; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (DISPLAY_VER(dev_priv) >= 10) + plane_color_ctl = plane_state->color_ctl | + glk_plane_color_ctl_crtc(crtc_state); + + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + if (aux_plane) { + aux_dist = skl_surf_address(plane_state, aux_plane) - + skl_surf_address(plane_state, color_plane); + + if (DISPLAY_VER(dev_priv) < 12) + aux_dist |= skl_plane_stride(plane_state, aux_plane); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), key->min_value); intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); @@ -1091,18 +1124,22 @@ skl_program_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), (plane_state->view.color_plane[1].y << 16) | plane_state->view.color_plane[1].x); - if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi)) - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + if (DISPLAY_VER(dev_priv) >= 10) + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); /* * Enable the scaler before the plane so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. + * + * TODO: split into noarm+arm pair */ if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); @@ -1114,7 +1151,7 @@ skl_program_plane(struct intel_plane *plane, */ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + skl_plane_surf(plane_state, color_plane)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1129,7 +1166,6 @@ skl_plane_async_flip(struct intel_plane *plane, unsigned long irqflags; enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 surf_addr = plane_state->view.color_plane[0].offset; u32 plane_ctl = plane_state->ctl; plane_ctl |= skl_plane_ctl_crtc(crtc_state); @@ -1141,15 +1177,29 @@ skl_plane_async_flip(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + skl_plane_surf(plane_state, 0)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +skl_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int color_plane = 0; + + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + /* Program the UV plane on planar master */ + color_plane = 1; + + skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); +} + +static void +skl_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { int color_plane = 0; @@ -1157,7 +1207,7 @@ skl_update_plane(struct intel_plane *plane, /* Program the UV plane on planar master */ color_plane = 1; - skl_program_plane(plane, crtc_state, plane_state, color_plane); + skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); } static bool intel_format_is_p01x(u32 format) @@ -1184,7 +1234,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return 0; if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && - is_ccs_modifier(fb->modifier)) { + intel_fb_is_ccs_modifier(fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "RC support only with 0/180 degree rotation (%x)\n", rotation); @@ -1236,13 +1286,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, /* Y-tiling is not supported in IF-ID Interlace mode */ if (crtc_state->hw.enable && crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && - (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { + fb->modifier != DRM_FORMAT_MOD_LINEAR && + fb->modifier != I915_FORMAT_MOD_X_TILED) { drm_dbg_kms(&dev_priv->drm, "Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; @@ -1439,7 +1484,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = fb->format->cpp[0]; - while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) { + while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (*offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -1488,7 +1533,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset, aux_plane)) { if (offset == 0) @@ -1552,7 +1597,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, uv_plane); - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { int ccs_plane = main_to_ccs_plane(fb, uv_plane); u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; u32 alignment = intel_surf_alignment(fb, uv_plane); @@ -1608,8 +1653,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) int hsub, vsub; int x, y; - if (!is_ccs_plane(fb, ccs_plane) || - is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) continue; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, @@ -1651,7 +1695,7 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since the main surface setup depends on * it. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -1689,6 +1733,18 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb) } } +static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; +} + +static bool pxp_is_borked(struct drm_i915_gem_object *obj) +{ + return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); +} + static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { @@ -1733,6 +1789,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; + if (DISPLAY_VER(dev_priv) >= 11) { + plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); + plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); + } + /* HW only has 8 bits pixel precision, disable plane if invisible */ if (!(plane_state->hw.alpha >> 8)) plane_state->uapi.visible = false; @@ -1764,6 +1825,15 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, return pipe == PIPE_A && plane_id == PLANE_PRIMARY; } +static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id) +{ + if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) + return dev_priv->fbc; + else + return NULL; +} + static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -1822,49 +1892,20 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } -static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (plane_id == PLANE_CURSOR) - return false; - - if (DISPLAY_VER(dev_priv) >= 11) - return true; - - if (IS_GEMINILAKE(dev_priv)) - return pipe != PIPE_C; - - return pipe != PIPE_C && - (plane_id == PLANE_PRIMARY || - plane_id == PLANE_SPRITE0); -} - static bool skl_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (!plane->has_ccs) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -1905,52 +1946,20 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0)) - return false; - - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - - return plane_id < PLANE_SPRITE4; -} - static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - struct drm_i915_private *dev_priv = to_i915(_plane->dev); struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) - return false; - fallthrough; - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_YUYV: @@ -1962,7 +1971,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + if (intel_fb_is_mc_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -1991,18 +2000,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, } } -static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return adlp_step_a_plane_format_modifiers; - else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) - return gen12_plane_format_modifiers_mc_ccs; - else - return gen12_plane_format_modifiers_rc_ccs; -} - static const struct drm_plane_funcs skl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -2043,6 +2040,64 @@ skl_plane_disable_flip_done(struct intel_plane *plane) spin_unlock_irq(&i915->irq_lock); } +static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + if (DISPLAY_VER(i915) >= 11) + return true; + + if (IS_GEMINILAKE(i915)) + return pipe != PIPE_C; + + return pipe != PIPE_C && + (plane_id == PLANE_PRIMARY || + plane_id == PLANE_SPRITE0); +} + +static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + if (DISPLAY_VER(i915) < 12) + return false; + + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || + IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) + return false; + + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + return plane_id < PLANE_SPRITE4; +} + +static u8 skl_get_plane_caps(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + u8 caps = INTEL_PLANE_CAP_TILING_X; + + if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915)) + caps |= INTEL_PLANE_CAP_TILING_Y; + if (DISPLAY_VER(i915) < 12) + caps |= INTEL_PLANE_CAP_TILING_Yf; + + if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { + caps |= INTEL_PLANE_CAP_CCS_RC; + if (DISPLAY_VER(i915) >= 12) + caps |= INTEL_PLANE_CAP_CCS_RC_CC; + } + + if (gen12_plane_has_mc_ccs(i915, plane_id)) + caps |= INTEL_PLANE_CAP_CCS_MC; + + return caps; +} + struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) @@ -2065,16 +2120,14 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->id = plane_id; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); - plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane); if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; - plane->max_width = icl_plane_max_width; + if (icl_is_hdr_plane(dev_priv, plane_id)) + plane->max_width = icl_hdr_plane_max_width; + else + plane->max_width = icl_sdr_plane_max_width; plane->max_height = icl_plane_max_height; plane->min_cdclk = icl_plane_min_cdclk; } else if (DISPLAY_VER(dev_priv) >= 10) { @@ -2088,8 +2141,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, } plane->max_stride = skl_plane_max_stride; - plane->update_plane = skl_update_plane; - plane->disable_plane = skl_disable_plane; + plane->update_noarm = skl_plane_update_noarm; + plane->update_arm = skl_plane_update_arm; + plane->disable_arm = skl_plane_disable_arm; plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; @@ -2111,29 +2165,28 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (DISPLAY_VER(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); + if (DISPLAY_VER(dev_priv) >= 12) plane_funcs = &gen12_plane_funcs; - } else { - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + else plane_funcs = &skl_plane_funcs; - } if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; else plane_type = DRM_PLANE_TYPE_OVERLAY; + modifiers = intel_fb_plane_get_modifiers(dev_priv, + skl_get_plane_caps(dev_priv, pipe, plane_id)); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 0ee4ff341e25..20141f33ed64 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -32,15 +32,19 @@ #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_backlight.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" -#include "intel_sideband.h" #include "skl_scaler.h" +#include "vlv_dsi.h" +#include "vlv_dsi_pll.h" +#include "vlv_sideband.h" /* return pixels in terms of txbyteclkhs */ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, @@ -270,23 +274,19 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; - const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - if (fixed_mode) { - intel_fixed_panel_mode(fixed_mode, adjusted_mode); + ret = intel_panel_compute_config(intel_connector, adjusted_mode); + if (ret) + return ret; - if (HAS_GMCH(dev_priv)) - ret = intel_gmch_panel_fitting(pipe_config, conn_state); - else - ret = intel_pch_panel_fitting(pipe_config, conn_state); - if (ret) - return ret; - } + ret = intel_panel_fitting(pipe_config, conn_state); + if (ret) + return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -883,7 +883,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_dsi_port_enable(encoder, pipe_config); } - intel_panel_enable_backlight(pipe_config, conn_state); + intel_backlight_enable(pipe_config, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); } @@ -913,7 +913,7 @@ static void intel_dsi_disable(struct intel_atomic_state *state, drm_dbg_kms(&i915->drm, "\n"); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); - intel_panel_disable_backlight(old_conn_state); + intel_backlight_disable(old_conn_state); /* * According to the spec we should send SHUTDOWN before @@ -1261,7 +1261,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; + drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); @@ -1273,6 +1275,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pclk = vlv_dsi_get_pclk(encoder, pipe_config); } + if (intel_dsi->dual_link) + pclk *= 2; + if (pclk) { pipe_config->hw.adjusted_mode.crtc_clock = pclk; pipe_config->port_clock = pclk; @@ -1633,25 +1638,21 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { static void vlv_dsi_add_properties(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + u32 allowed_scalers; - if (connector->panel.fixed_mode) { - u32 allowed_scalers; + allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); + if (!HAS_GMCH(dev_priv)) + allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); - allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); - if (!HAS_GMCH(dev_priv)) - allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); + drm_connector_attach_scaling_mode_property(&connector->base, + allowed_scalers); - drm_connector_attach_scaling_mode_property(&connector->base, - allowed_scalers); + connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; - connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; - - drm_connector_set_panel_orientation_with_quirk( - &connector->base, - intel_dsi_get_panel_orientation(connector), - connector->panel.fixed_mode->hdisplay, - connector->panel.fixed_mode->vdisplay); - } + drm_connector_set_panel_orientation_with_quirk(&connector->base, + intel_dsi_get_panel_orientation(connector), + connector->panel.fixed_mode->hdisplay, + connector->panel.fixed_mode->vdisplay); } #define NS_KHZ_RATIO 1000000 @@ -1876,7 +1877,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; intel_encoder->get_config = intel_dsi_get_config; - intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->update_pipe = intel_backlight_update; intel_encoder->shutdown = intel_dsi_shutdown; intel_connector->get_hw_state = intel_connector_get_hw_state; @@ -1964,7 +1965,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } intel_panel_init(&intel_connector->panel, fixed_mode, NULL); - intel_panel_setup_backlight(connector, INVALID_PIPE); + intel_backlight_setup(intel_connector, INVALID_PIPE); vlv_dsi_add_properties(intel_connector); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h new file mode 100644 index 000000000000..0c2b279df9d4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_H__ +#define __VLV_DSI_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_dsi; + +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); +void vlv_dsi_init(struct drm_i915_private *dev_priv); + +#endif /* __VLV_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 90185b219447..1b81797dd02e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -31,7 +31,8 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" -#include "intel_sideband.h" +#include "vlv_dsi_pll.h" +#include "vlv_sideband.h" static const u16 lfsr_converts[] = { 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ @@ -568,3 +569,26 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) } intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); } + +static void assert_dsi_pll(struct drm_i915_private *i915, bool state) +{ + bool cur_state; + + vlv_cck_get(i915); + cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN; + vlv_cck_put(i915); + + I915_STATE_WARN(cur_state != state, + "DSI PLL state assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} + +void assert_dsi_pll_enabled(struct drm_i915_private *i915) +{ + assert_dsi_pll(i915, true); +} + +void assert_dsi_pll_disabled(struct drm_i915_private *i915) +{ + assert_dsi_pll(i915, false); +} diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h new file mode 100644 index 000000000000..ab9291ad1e79 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_PLL_H__ +#define __VLV_DSI_PLL_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_crtc_state; +struct intel_encoder; + +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +void assert_dsi_pll_enabled(struct drm_i915_private *i915); +void assert_dsi_pll_disabled(struct drm_i915_private *i915); + +#endif /* __VLV_DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c deleted file mode 100644 index 7df91b7e4ca8..000000000000 --- a/drivers/gpu/drm/i915/dma_resv_utils.c +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2020 Intel Corporation - */ - -#include <linux/dma-resv.h> - -#include "dma_resv_utils.h" - -void dma_resv_prune(struct dma_resv *resv) -{ - if (dma_resv_trylock(resv)) { - if (dma_resv_test_signaled(resv, true)) - dma_resv_add_excl_fence(resv, NULL); - dma_resv_unlock(resv); - } -} diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h deleted file mode 100644 index b9d8fb5f8367..000000000000 --- a/drivers/gpu/drm/i915/dma_resv_utils.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2020 Intel Corporation - */ - -#ifndef DMA_RESV_UTILS_H -#define DMA_RESV_UTILS_H - -struct dma_resv; - -void dma_resv_prune(struct dma_resv *resv); - -#endif /* DMA_RESV_UTILS_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 6234e17259c1..470fdfd61a0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -4,6 +4,8 @@ * Copyright © 2014-2016 Intel Corporation */ +#include <linux/dma-fence-array.h> + #include "gt/intel_engine.h" #include "i915_gem_ioctls.h" @@ -36,7 +38,7 @@ static __always_inline u32 __busy_write_id(u16 id) } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) +__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -46,29 +48,60 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) * to eventually flush us, but to minimise latency just ask the * hardware. * - * Note we only report on the status of native fences. + * Note we only report on the status of native fences and we currently + * have two native fences: + * + * 1. A composite fence (dma_fence_array) constructed of i915 requests + * created during a parallel submission. In this case we deconstruct the + * composite fence into individual i915 requests and check the status of + * each request. + * + * 2. A single i915 request. */ - if (!dma_fence_is_i915(fence)) + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + struct dma_fence **child = array->fences; + unsigned int nchild = array->num_fences; + + do { + struct dma_fence *current_fence = *child++; + + /* Not an i915 fence, can't be busy per above */ + if (!dma_fence_is_i915(current_fence) || + !test_bit(I915_FENCE_FLAG_COMPOSITE, + ¤t_fence->flags)) { + return 0; + } + + rq = to_request(current_fence); + if (!i915_request_completed(rq)) + return flag(rq->engine->uabi_class); + } while (--nchild); + + /* All requests in array complete, not busy */ return 0; + } else { + if (!dma_fence_is_i915(fence)) + return 0; - /* opencode to_request() in order to avoid const warnings */ - rq = container_of(fence, const struct i915_request, fence); - if (i915_request_completed(rq)) - return 0; + rq = to_request(fence); + if (i915_request_completed(rq)) + return 0; - /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); - return flag(rq->engine->uabi_class); + /* Beware type-expansion follies! */ + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); + return flag(rq->engine->uabi_class); + } } static __always_inline unsigned int -busy_check_reader(const struct dma_fence *fence) +busy_check_reader(struct dma_fence *fence) { return __busy_set_if_active(fence, __busy_read_flag); } static __always_inline unsigned int -busy_check_writer(const struct dma_fence *fence) +busy_check_writer(struct dma_fence *fence) { if (!fence) return 0; @@ -82,8 +115,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_busy *args = data; struct drm_i915_gem_object *obj; - struct dma_resv_list *list; - unsigned int seq; + struct dma_resv_iter cursor; + struct dma_fence *fence; int err; err = -ENOENT; @@ -109,27 +142,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * to report the overall busyness. This is what the wait-ioctl does. * */ -retry: - seq = raw_read_seqcount(&obj->base.resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv)); - - /* Translate shared fences to READ set of engines */ - list = dma_resv_shared_list(obj->base.resv); - if (list) { - unsigned int shared_count = list->shared_count, i; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); - + args->busy = 0; + dma_resv_iter_begin(&cursor, obj->base.resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + args->busy = 0; + + if (dma_resv_iter_is_exclusive(&cursor)) + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy |= busy_check_writer(fence); + else + /* Translate shared fences to READ set of engines */ args->busy |= busy_check_reader(fence); - } } - - if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq)) - goto retry; + dma_resv_iter_end(&cursor); err = 0; out: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index f0435c6feb68..8a248003dfae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -69,10 +69,16 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct clflush *clflush; assert_object_held(obj); + if (IS_DGFX(i915)) { + WARN_ON_ONCE(obj->cache_dirty); + return false; + } + /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. @@ -105,16 +111,24 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, if (clflush) { i915_sw_fence_await_reservation(&clflush->base.chain, obj->base.resv, NULL, true, - i915_fence_timeout(to_i915(obj->base.dev)), + i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); dma_fence_work_commit(&clflush->base); + /* + * We must have successfully populated the pages(since we are + * holding a pin on the pages as per the flush worker) to reach + * this point, which must mean we have already done the required + * flush-on-acquire, hence resetting cache_dirty here should be + * safe. + */ + obj->cache_dirty = false; } else if (obj->mm.pages) { __do_clflush(obj); + obj->cache_dirty = false; } else { GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); } - obj->cache_dirty = false; return true; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index cff72679ad7c..347dab952e90 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -77,6 +77,8 @@ #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" +#include "pxp/intel_pxp.h" + #include "i915_gem_context.h" #include "i915_trace.h" #include "i915_user_extensions.h" @@ -186,10 +188,13 @@ static int validate_priority(struct drm_i915_private *i915, return 0; } -static void proto_context_close(struct i915_gem_proto_context *pc) +static void proto_context_close(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc) { int i; + if (pc->pxp_wakeref) + intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); if (pc->vm) i915_vm_put(pc->vm); if (pc->user_engines) { @@ -241,6 +246,35 @@ static int proto_context_set_persistence(struct drm_i915_private *i915, return 0; } +static int proto_context_set_protected(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc, + bool protected) +{ + int ret = 0; + + if (!protected) { + pc->uses_protected_content = false; + } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) { + ret = -ENODEV; + } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || + !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { + ret = -EPERM; + } else { + pc->uses_protected_content = true; + + /* + * protected context usage requires the PXP session to be up, + * which in turn requires the device to be active. + */ + pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + if (!intel_pxp_is_active(&i915->gt.pxp)) + ret = intel_pxp_start(&i915->gt.pxp); + } + + return ret; +} + static struct i915_gem_proto_context * proto_context_create(struct drm_i915_private *i915, unsigned int flags) { @@ -269,7 +303,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags) return pc; proto_close: - proto_context_close(pc); + proto_context_close(i915, pc); return err; } @@ -442,6 +476,13 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) u16 idx, num_bonds; int err, n; + if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && + !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { + drm_dbg(&i915->drm, + "Bonding not supported on this platform\n"); + return -ENODEV; + } + if (get_user(idx, &ext->virtual_index)) return -EFAULT; @@ -515,9 +556,147 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) return 0; } +static int +set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, + void *data) +{ + struct i915_context_engines_parallel_submit __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + u64 flags; + int err = 0, n, i, j; + u16 slot, width, num_siblings; + struct intel_engine_cs **siblings = NULL; + intel_engine_mask_t prev_mask; + + /* FIXME: This is NIY for execlists */ + if (!(intel_uc_uses_guc_submission(&i915->gt.uc))) + return -ENODEV; + + if (get_user(slot, &ext->engine_index)) + return -EFAULT; + + if (get_user(width, &ext->width)) + return -EFAULT; + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + if (slot >= set->num_engines) { + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + slot, set->num_engines); + return -EINVAL; + } + + if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", slot); + return -EINVAL; + } + + if (get_user(flags, &ext->flags)) + return -EFAULT; + + if (flags) { + drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); + return -EINVAL; + } + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (width < 2) { + drm_dbg(&i915->drm, "Width (%d) < 2\n", width); + return -EINVAL; + } + + if (num_siblings < 1) { + drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", + num_siblings); + return -EINVAL; + } + + siblings = kmalloc_array(num_siblings * width, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return -ENOMEM; + + /* Create contexts / engines */ + for (i = 0; i < width; ++i) { + intel_engine_mask_t current_mask = 0; + struct i915_engine_class_instance prev_engine; + + for (j = 0; j < num_siblings; ++j) { + struct i915_engine_class_instance ci; + + n = i * num_siblings + j; + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto out_err; + } + + siblings[n] = + intel_engine_lookup_user(i915, ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto out_err; + } + + if (n) { + if (prev_engine.engine_class != + ci.engine_class) { + drm_dbg(&i915->drm, + "Mismatched class %d, %d\n", + prev_engine.engine_class, + ci.engine_class); + err = -EINVAL; + goto out_err; + } + } + + prev_engine = ci; + current_mask |= siblings[n]->logical_mask; + } + + if (i > 0) { + if (current_mask != prev_mask << 1) { + drm_dbg(&i915->drm, + "Non contiguous logical mask 0x%x, 0x%x\n", + prev_mask, current_mask); + err = -EINVAL; + goto out_err; + } + } + prev_mask = current_mask; + } + + set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; + set->engines[slot].num_siblings = num_siblings; + set->engines[slot].width = width; + set->engines[slot].siblings = siblings; + + return 0; + +out_err: + kfree(siblings); + + return err; +} + static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, + [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = + set_proto_ctx_engines_parallel_submit, }; static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, @@ -686,6 +865,8 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, ret = -EPERM; else if (args->value) pc->user_flags |= BIT(UCONTEXT_BANNABLE); + else if (pc->uses_protected_content) + ret = -EPERM; else pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break; @@ -693,10 +874,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; - else if (args->value) - pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); - else + else if (!args->value) pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); + else if (pc->uses_protected_content) + ret = -EPERM; + else + pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); break; case I915_CONTEXT_PARAM_PRIORITY: @@ -724,6 +907,11 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, args->value); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = proto_context_set_protected(fpriv->dev_priv, pc, + args->value); + break; + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: @@ -735,44 +923,6 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, return ret; } -static struct i915_address_space * -context_get_vm_rcu(struct i915_gem_context *ctx) -{ - GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); - - do { - struct i915_address_space *vm; - - /* - * We do not allow downgrading from full-ppgtt [to a shared - * global gtt], so ctx->vm cannot become NULL. - */ - vm = rcu_dereference(ctx->vm); - if (!kref_get_unless_zero(&vm->ref)) - continue; - - /* - * This ppgtt may have be reallocated between - * the read and the kref, and reassigned to a third - * context. In order to avoid inadvertent sharing - * of this ppgtt with that third context (and not - * src), we have to confirm that we have the same - * ppgtt after passing through the strong memory - * barrier implied by a successful - * kref_get_unless_zero(). - * - * Once we have acquired the current ppgtt of ctx, - * we no longer care if it is released from ctx, as - * it cannot be reallocated elsewhere. - */ - - if (vm == rcu_access_pointer(ctx->vm)) - return rcu_pointer_handoff(vm); - - i915_vm_put(vm); - } while (1); -} - static int intel_context_set_gem(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_sseu sseu) @@ -782,25 +932,18 @@ static int intel_context_set_gem(struct intel_context *ce, GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); RCU_INIT_POINTER(ce->gem_context, ctx); + GEM_BUG_ON(intel_context_is_pinned(ce)); ce->ring_size = SZ_16K; - if (rcu_access_pointer(ctx->vm)) { - struct i915_address_space *vm; - - rcu_read_lock(); - vm = context_get_vm_rcu(ctx); /* hmm */ - rcu_read_unlock(); - - i915_vm_put(ce->vm); - ce->vm = vm; - } + i915_vm_put(ce->vm); + ce->vm = i915_gem_context_get_eb_vm(ctx); if (ctx->sched.priority >= I915_PRIORITY_NORMAL && intel_engine_has_timeslices(ce->engine) && intel_engine_has_semaphores(ce->engine)) __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); - if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) && + if (CONFIG_DRM_I915_REQUEST_TIMEOUT && ctx->i915->params.request_timeout_ms) { unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; @@ -814,6 +957,25 @@ static int intel_context_set_gem(struct intel_context *ce, return ret; } +static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) +{ + while (count--) { + struct intel_context *ce = e->engines[count], *child; + + if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) + continue; + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + } +} + +static void unpin_engines(struct i915_gem_engines *e) +{ + __unpin_engines(e, e->num_engines); +} + static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { @@ -839,7 +1001,7 @@ static void free_engines_rcu(struct rcu_head *rcu) free_engines(engines); } -static int __i915_sw_fence_call +static int engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_gem_engines *engines = @@ -929,6 +1091,40 @@ free_engines: return err; } +static int perma_pin_contexts(struct intel_context *ce) +{ + struct intel_context *child; + int i = 0, j = 0, ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + ret = intel_context_pin(ce); + if (unlikely(ret)) + return ret; + + for_each_child(ce, child) { + ret = intel_context_pin(child); + if (unlikely(ret)) + goto unwind; + ++i; + } + + set_bit(CONTEXT_PERMA_PIN, &ce->flags); + + return 0; + +unwind: + intel_context_unpin(ce); + for_each_child(ce, child) { + if (j++ < i) + intel_context_unpin(child); + else + break; + } + + return ret; +} + static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, unsigned int num_engines, struct i915_gem_proto_engine *pe) @@ -937,8 +1133,12 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, unsigned int n; e = alloc_engines(num_engines); + if (!e) + return ERR_PTR(-ENOMEM); + e->num_engines = num_engines; + for (n = 0; n < num_engines; n++) { - struct intel_context *ce; + struct intel_context *ce, *child; int ret; switch (pe[n].type) { @@ -948,7 +1148,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, case I915_GEM_ENGINE_TYPE_BALANCED: ce = intel_engine_create_virtual(pe[n].siblings, - pe[n].num_siblings); + pe[n].num_siblings, 0); + break; + + case I915_GEM_ENGINE_TYPE_PARALLEL: + ce = intel_engine_create_parallel(pe[n].siblings, + pe[n].num_siblings, + pe[n].width); break; case I915_GEM_ENGINE_TYPE_INVALID: @@ -969,8 +1175,31 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, err = ERR_PTR(ret); goto free_engines; } + for_each_child(ce, child) { + ret = intel_context_set_gem(child, ctx, pe->sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } + + /* + * XXX: Must be done after calling intel_context_set_gem as that + * function changes the ring size. The ring is allocated when + * the context is pinned. If the ring size is changed after + * allocation we have a mismatch of the ring size and will cause + * the context to hang. Presumably with a bit of reordering we + * could move the perma-pin step to the backend function + * intel_engine_create_parallel. + */ + if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { + ret = perma_pin_contexts(ce); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } } - e->num_engines = num_engines; return e; @@ -979,13 +1208,25 @@ free_engines: return err; } -void i915_gem_context_release(struct kref *ref) +static void i915_gem_context_release_work(struct work_struct *work) { - struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + struct i915_gem_context *ctx = container_of(work, typeof(*ctx), + release_work); + struct i915_address_space *vm; trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + if (ctx->syncobj) + drm_syncobj_put(ctx->syncobj); + + vm = ctx->vm; + if (vm) + i915_vm_put(vm); + + if (ctx->pxp_wakeref) + intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -995,6 +1236,13 @@ void i915_gem_context_release(struct kref *ref) kfree_rcu(ctx, rcu); } +void i915_gem_context_release(struct kref *ref) +{ + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + + queue_work(ctx->i915->wq, &ctx->release_work); +} + static inline struct i915_gem_engines * __context_engines_static(const struct i915_gem_context *ctx) { @@ -1193,6 +1441,7 @@ static void context_close(struct i915_gem_context *ctx) /* Flush any concurrent set_engines() */ mutex_lock(&ctx->engines_mutex); + unpin_engines(__context_engines_static(ctx)); engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); mutex_unlock(&ctx->engines_mutex); @@ -1201,12 +1450,16 @@ static void context_close(struct i915_gem_context *ctx) set_closed_name(ctx); - vm = i915_gem_context_vm(ctx); - if (vm) + vm = ctx->vm; + if (vm) { + /* i915_vm_close drops the final reference, which is a bit too + * early and could result in surprises with concurrent + * operations racing with thist ctx close. Keep a full reference + * until the end. + */ + i915_vm_get(vm); i915_vm_close(vm); - - if (ctx->syncobj) - drm_syncobj_put(ctx->syncobj); + } ctx->file_priv = ERR_PTR(-EBADF); @@ -1277,49 +1530,6 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) return 0; } -static inline struct i915_gem_engines * -__context_engines_await(const struct i915_gem_context *ctx, - bool *user_engines) -{ - struct i915_gem_engines *engines; - - rcu_read_lock(); - do { - engines = rcu_dereference(ctx->engines); - GEM_BUG_ON(!engines); - - if (user_engines) - *user_engines = i915_gem_context_user_engines(ctx); - - /* successful await => strong mb */ - if (unlikely(!i915_sw_fence_await(&engines->fence))) - continue; - - if (likely(engines == rcu_access_pointer(ctx->engines))) - break; - - i915_sw_fence_complete(&engines->fence); - } while (1); - rcu_read_unlock(); - - return engines; -} - -static void -context_apply_all(struct i915_gem_context *ctx, - void (*fn)(struct intel_context *ce, void *data), - void *data) -{ - struct i915_gem_engines_iter it; - struct i915_gem_engines *e; - struct intel_context *ce; - - e = __context_engines_await(ctx, NULL); - for_each_gem_engine(ce, e, it) - fn(ce, data); - i915_sw_fence_complete(&e->fence); -} - static struct i915_gem_context * i915_gem_create_context(struct drm_i915_private *i915, const struct i915_gem_proto_context *pc) @@ -1339,6 +1549,7 @@ i915_gem_create_context(struct drm_i915_private *i915, ctx->sched = pc->sched; mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->link); + INIT_WORK(&ctx->release_work, i915_gem_context_release_work); spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); @@ -1348,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915, } else if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(&i915->gt); + ppgtt = i915_ppgtt_create(&i915->gt, 0); if (IS_ERR(ppgtt)) { drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -1358,7 +1569,7 @@ i915_gem_create_context(struct drm_i915_private *i915, vm = &ppgtt->vm; } if (vm) { - RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm)); + ctx->vm = i915_vm_open(vm); /* i915_vm_open() takes a reference */ i915_vm_put(vm); @@ -1399,6 +1610,11 @@ i915_gem_create_context(struct drm_i915_private *i915, goto err_engines; } + if (pc->uses_protected_content) { + ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + ctx->uses_protected_content = true; + } + trace_i915_context_create(ctx); return ctx; @@ -1470,7 +1686,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, } ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err; @@ -1497,7 +1713,7 @@ void i915_gem_context_close(struct drm_file *file) unsigned long idx; xa_for_each(&file_priv->proto_context_xa, idx, pc) - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); xa_destroy(&file_priv->proto_context_xa); mutex_destroy(&file_priv->proto_context_lock); @@ -1526,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags) return -EINVAL; - ppgtt = i915_ppgtt_create(&i915->gt); + ppgtt = i915_ppgtt_create(&i915->gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1581,18 +1797,15 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, int err; u32 id; - if (!rcu_access_pointer(ctx->vm)) + if (!i915_gem_context_has_full_ppgtt(ctx)) return -ENODEV; - rcu_read_lock(); - vm = context_get_vm_rcu(ctx); - rcu_read_unlock(); - if (!vm) - return -ENODEV; + vm = ctx->vm; + GEM_BUG_ON(!vm); err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); if (err) - goto err_put; + return err; i915_vm_open(vm); @@ -1600,8 +1813,6 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, args->value = id; args->size = 0; -err_put: - i915_vm_put(vm); return err; } @@ -1769,23 +1980,11 @@ set_persistence(struct i915_gem_context *ctx, return __context_set_persistence(ctx, args->value); } -static void __apply_priority(struct intel_context *ce, void *arg) -{ - struct i915_gem_context *ctx = arg; - - if (!intel_engine_has_timeslices(ce->engine)) - return; - - if (ctx->sched.priority >= I915_PRIORITY_NORMAL && - intel_engine_has_semaphores(ce->engine)) - intel_context_set_use_semaphores(ce); - else - intel_context_clear_use_semaphores(ce); -} - static int set_priority(struct i915_gem_context *ctx, const struct drm_i915_gem_context_param *args) { + struct i915_gem_engines_iter it; + struct intel_context *ce; int err; err = validate_priority(ctx->i915, args); @@ -1793,7 +1992,27 @@ static int set_priority(struct i915_gem_context *ctx, return err; ctx->sched.priority = args->value; - context_apply_all(ctx, __apply_priority, ctx); + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_has_timeslices(ce->engine)) + continue; + + if (ctx->sched.priority >= I915_PRIORITY_NORMAL && + intel_engine_has_semaphores(ce->engine)) + intel_context_set_use_semaphores(ce); + else + intel_context_clear_use_semaphores(ce); + } + i915_gem_context_unlock_engines(ctx); + + return 0; +} + +static int get_protected(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + args->size = 0; + args->value = i915_gem_context_uses_protected_content(ctx); return 0; } @@ -1821,6 +2040,8 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = -EPERM; else if (args->value) i915_gem_context_set_bannable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't clear this for protected contexts */ else i915_gem_context_clear_bannable(ctx); break; @@ -1828,10 +2049,12 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; - else if (args->value) - i915_gem_context_set_recoverable(ctx); - else + else if (!args->value) i915_gem_context_clear_recoverable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't set this for protected contexts */ + else + i915_gem_context_set_recoverable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: @@ -1846,6 +2069,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_persistence(ctx, args); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: @@ -1924,7 +2148,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv, old = xa_erase(&file_priv->proto_context_xa, id); GEM_BUG_ON(old != pc); - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); /* One for the xarray and one for the caller */ return i915_gem_context_get(ctx); @@ -2010,7 +2234,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, goto err_pc; } - proto_context_close(ext_data.pc); + proto_context_close(i915, ext_data.pc); gem_context_register(ctx, ext_data.fpriv, id); } else { ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); @@ -2024,7 +2248,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return 0; err_pc: - proto_context_close(ext_data.pc); + proto_context_close(i915, ext_data.pc); return ret; } @@ -2055,7 +2279,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, GEM_WARN_ON(ctx && pc); if (pc) - proto_context_close(pc); + proto_context_close(file_priv->dev_priv, pc); if (ctx) context_close(ctx); @@ -2124,6 +2348,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; + struct i915_address_space *vm; int ret = 0; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); @@ -2133,12 +2358,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, switch (args->param) { case I915_CONTEXT_PARAM_GTT_SIZE: args->size = 0; - rcu_read_lock(); - if (rcu_access_pointer(ctx->vm)) - args->value = rcu_dereference(ctx->vm)->total; - else - args->value = to_i915(dev)->ggtt.vm.total; - rcu_read_unlock(); + vm = i915_gem_context_get_eb_vm(ctx); + args->value = vm->total; + i915_vm_put(vm); + break; case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: @@ -2174,6 +2397,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = i915_gem_context_is_persistent(ctx); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = get_protected(ctx, args); + break; + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_ENGINES: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 18060536b0c2..babfecb17ad1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -108,6 +108,12 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); } +static inline bool +i915_gem_context_uses_protected_content(const struct i915_gem_context *ctx) +{ + return ctx->uses_protected_content; +} + /* i915_gem_context.c */ void i915_gem_init__contexts(struct drm_i915_private *i915); @@ -154,17 +160,22 @@ i915_gem_context_vm(struct i915_gem_context *ctx) return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex)); } +static inline bool i915_gem_context_has_full_ppgtt(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(!!ctx->vm != HAS_FULL_PPGTT(ctx->i915)); + + return !!ctx->vm; +} + static inline struct i915_address_space * -i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx) +i915_gem_context_get_eb_vm(struct i915_gem_context *ctx) { struct i915_address_space *vm; - rcu_read_lock(); - vm = rcu_dereference(ctx->vm); + vm = ctx->vm; if (!vm) vm = &ctx->i915->ggtt.vm; vm = i915_vm_get(vm); - rcu_read_unlock(); return vm; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 94c03a97cb77..282cdb8a5c5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -78,13 +78,16 @@ enum i915_gem_engine_type { /** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */ I915_GEM_ENGINE_TYPE_BALANCED, + + /** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */ + I915_GEM_ENGINE_TYPE_PARALLEL, }; /** * struct i915_gem_proto_engine - prototype engine * * This struct describes an engine that a context may contain. Engines - * have three types: + * have four types: * * - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they * show up as a NULL in i915_gem_engines::engines[i] and any attempt to @@ -97,6 +100,10 @@ enum i915_gem_engine_type { * * - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described * i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings. + * + * - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described + * i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and + * i915_gem_proto_engine::siblings. */ struct i915_gem_proto_engine { /** @type: Type of this engine */ @@ -105,10 +112,13 @@ struct i915_gem_proto_engine { /** @engine: Engine, for physical */ struct intel_engine_cs *engine; - /** @num_siblings: Number of balanced siblings */ + /** @num_siblings: Number of balanced or parallel siblings */ unsigned int num_siblings; - /** @siblings: Balanced siblings */ + /** @width: Width of each sibling */ + unsigned int width; + + /** @siblings: Balanced siblings or num_siblings * width for parallel */ struct intel_engine_cs **siblings; /** @sseu: Client-set SSEU parameters */ @@ -198,6 +208,12 @@ struct i915_gem_proto_context { /** @single_timeline: See See &i915_gem_context.syncobj */ bool single_timeline; + + /** @uses_protected_content: See &i915_gem_context.uses_protected_content */ + bool uses_protected_content; + + /** @pxp_wakeref: See &i915_gem_context.pxp_wakeref */ + intel_wakeref_t pxp_wakeref; }; /** @@ -262,7 +278,7 @@ struct i915_gem_context { * In other modes, this is a NULL pointer with the expectation that * the caller uses the shared global GTT. */ - struct i915_address_space __rcu *vm; + struct i915_address_space *vm; /** * @pid: process id of creator @@ -289,6 +305,18 @@ struct i915_gem_context { struct kref ref; /** + * @release_work: + * + * Work item for deferred cleanup, since i915_gem_context_put() tends to + * be called from hardirq context. + * + * FIXME: The only real reason for this is &i915_gem_engines.fence, all + * other callers are from process context and need at most some mild + * shuffling to pull the i915_gem_context_put() call out of a spinlock. + */ + struct work_struct release_work; + + /** * @rcu: rcu_head for deferred freeing. */ struct rcu_head rcu; @@ -309,6 +337,28 @@ struct i915_gem_context { #define CONTEXT_CLOSED 0 #define CONTEXT_USER_ENGINES 1 + /** + * @uses_protected_content: context uses PXP-encrypted objects. + * + * This flag can only be set at ctx creation time and it's immutable for + * the lifetime of the context. See I915_CONTEXT_PARAM_PROTECTED_CONTENT + * in uapi/drm/i915_drm.h for more info on setting restrictions and + * expected behaviour of marked contexts. + */ + bool uses_protected_content; + + /** + * @pxp_wakeref: wakeref to keep the device awake when PXP is in use + * + * PXP sessions are invalidated when the device is suspended, which in + * turns invalidates all contexts and objects using it. To keep the + * flow simple, we keep the device awake when contexts using PXP objects + * are in use. It is expected that the userspace application only uses + * PXP when the display is on, so taking a wakeref here shouldn't worsen + * our power metrics. + */ + intel_wakeref_t pxp_wakeref; + /** @mutex: guards everything that isn't engines or handles_vma */ struct mutex mutex; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c index 23fee13a3384..8955d6abcef1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_create.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c @@ -6,6 +6,7 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" +#include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_trace.h" @@ -82,21 +83,11 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj, return 0; } -/** - * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT - * @i915: i915 private - * @size: size of the buffer, in bytes - * @placements: possible placement regions, in priority order - * @n_placements: number of possible placement regions - * - * This function is exposed primarily for selftests and does very little - * error checking. It is assumed that the set of placement regions has - * already been verified to be valid. - */ -struct drm_i915_gem_object * -__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, - struct intel_memory_region **placements, - unsigned int n_placements) +static struct drm_i915_gem_object * +__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size, + struct intel_memory_region **placements, + unsigned int n_placements, + unsigned int ext_flags) { struct intel_memory_region *mr = placements[0]; struct drm_i915_gem_object *obj; @@ -135,6 +126,9 @@ __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, GEM_BUG_ON(size != obj->base.size); + /* Add any flag set by create_ext options */ + obj->flags |= ext_flags; + trace_i915_gem_object_create(obj); return obj; @@ -145,6 +139,26 @@ object_free: return ERR_PTR(ret); } +/** + * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT + * @i915: i915 private + * @size: size of the buffer, in bytes + * @placements: possible placement regions, in priority order + * @n_placements: number of possible placement regions + * + * This function is exposed primarily for selftests and does very little + * error checking. It is assumed that the set of placement regions has + * already been verified to be valid. + */ +struct drm_i915_gem_object * +__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, + struct intel_memory_region **placements, + unsigned int n_placements) +{ + return __i915_gem_object_create_user_ext(i915, size, placements, + n_placements, 0); +} + int i915_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -224,6 +238,7 @@ struct create_ext { struct drm_i915_private *i915; struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; unsigned int n_placements; + unsigned long flags; }; static void repr_placements(char *buf, size_t size, @@ -347,17 +362,34 @@ static int ext_set_placements(struct i915_user_extension __user *base, { struct drm_i915_gem_create_ext_memory_regions ext; - if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) - return -ENODEV; - if (copy_from_user(&ext, base, sizeof(ext))) return -EFAULT; return set_placements(&ext, data); } +static int ext_set_protected(struct i915_user_extension __user *base, void *data) +{ + struct drm_i915_gem_create_ext_protected_content ext; + struct create_ext *ext_data = data; + + if (copy_from_user(&ext, base, sizeof(ext))) + return -EFAULT; + + if (ext.flags) + return -EINVAL; + + if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp)) + return -ENODEV; + + ext_data->flags |= I915_BO_PROTECTED; + + return 0; +} + static const i915_user_extension_fn create_extensions[] = { [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, + [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, }; /** @@ -392,9 +424,10 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, ext_data.n_placements = 1; } - obj = __i915_gem_object_create_user(i915, args->size, - ext_data.placements, - ext_data.n_placements); + obj = __i915_gem_object_create_user_ext(i915, args->size, + ext_data.placements, + ext_data.n_placements, + ext_data.flags); if (IS_ERR(obj)) return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index afa34111de02..1b526039a60d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -7,11 +7,16 @@ #include <linux/dma-buf.h> #include <linux/highmem.h> #include <linux/dma-resv.h> +#include <linux/module.h> + +#include <asm/smp.h> #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" +MODULE_IMPORT_NS(DMA_BUF); + I915_SELFTEST_DECLARE(static bool force_different_devices;) static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) @@ -232,6 +237,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags) static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *pages; unsigned int sg_page_sizes; @@ -242,8 +248,22 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); - sg_page_sizes = i915_sg_dma_sizes(pages->sgl); + /* + * DG1 is special here since it still snoops transactions even with + * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We + * might need to revisit this as we add new discrete platforms. + * + * XXX: Consider doing a vmap flush or something, where possible. + * Currently we just do a heavy handed wbinvd_on_all_cpus() here since + * the underlying sg_table might not even point to struct pages, so we + * can't just call drm_clflush_sg or similar, like we do elsewhere in + * the driver. + */ + if (i915_gem_object_can_bypass_llc(obj) || + (!HAS_LLC(i915) && !IS_DG1(i915))) + wbinvd_on_all_cpus(); + sg_page_sizes = i915_sg_dma_sizes(pages->sgl); __i915_gem_object_set_pages(obj, pages, sg_page_sizes); return 0; @@ -301,7 +321,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, + I915_BO_ALLOC_USER); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index b684a62bf3b0..26532c07d467 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -18,10 +18,32 @@ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (IS_DGFX(i915)) + return false; + return !(obj->cache_level == I915_CACHE_NONE || obj->cache_level == I915_CACHE_WT); } +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (obj->cache_dirty) + return false; + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + return true; + + if (IS_DGFX(i915)) + return false; + + /* Currently in use by HW (display engine)? Keep flushed. */ + return i915_gem_object_is_framebuffer(obj); +} + static void flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1aa249908b64..60ee60f7bb09 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -21,12 +21,15 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" +#include "pxp/intel_pxp.h" + #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_ioctls.h" #include "i915_trace.h" #include "i915_user_extensions.h" +#include "i915_vma_snapshot.h" struct eb_vma { struct i915_vma *vma; @@ -244,17 +247,25 @@ struct i915_execbuffer { struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ struct eb_vma *vma; - struct intel_engine_cs *engine; /** engine to queue the request to */ + struct intel_gt *gt; /* gt for the execbuf */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ - struct i915_request *request; /** our request to build */ - struct eb_vma *batch; /** identity of the batch obj/vma */ + /** our requests to build */ + struct i915_request *requests[MAX_ENGINE_INSTANCE + 1]; + /** identity of the batch obj/vma */ + struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1]; struct i915_vma *trampoline; /** trampoline used for chaining */ + /** used for excl fence in dma_resv objects when > 1 BB submitted */ + struct dma_fence *composite_fence; + /** actual size of execobj[] as we may extend it for the cmdparser */ unsigned int buffer_count; + /* number of batches in execbuf IOCTL */ + unsigned int num_batches; + /** list of vma not yet bound during reservation phase */ struct list_head unbound; @@ -281,7 +292,8 @@ struct i915_execbuffer { u64 invalid_flags; /** Set of execobj.flags that are invalid */ - u64 batch_len; /** Length of batch within object */ + /** Length of batch within object */ + u64 batch_len[MAX_ENGINE_INSTANCE + 1]; u32 batch_start_offset; /** Location within object of batch */ u32 batch_flags; /** Flags composed for emit_bb_start() */ struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */ @@ -296,17 +308,20 @@ struct i915_execbuffer { struct eb_fence *fences; unsigned long num_fences; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; +#endif }; static int eb_parse(struct i915_execbuffer *eb); -static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, - bool throttle); +static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); +static void eb_capture_release(struct i915_execbuffer *eb); static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { - return intel_engine_requires_cmd_parser(eb->engine) || - (intel_engine_using_cmd_parser(eb->engine) && + return intel_engine_requires_cmd_parser(eb->context->engine) || + (intel_engine_using_cmd_parser(eb->context->engine) && eb->args->batch_len); } @@ -533,11 +548,21 @@ eb_validate_vma(struct i915_execbuffer *eb, return 0; } -static void +static inline bool +is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx) +{ + return eb->args->flags & I915_EXEC_BATCH_FIRST ? + buffer_idx < eb->num_batches : + buffer_idx >= eb->args->buffer_count - eb->num_batches; +} + +static int eb_add_vma(struct i915_execbuffer *eb, - unsigned int i, unsigned batch_idx, + unsigned int *current_batch, + unsigned int i, struct i915_vma *vma) { + struct drm_i915_private *i915 = eb->i915; struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; @@ -564,15 +589,43 @@ eb_add_vma(struct i915_execbuffer *eb, * Note that actual hangs have only been observed on gen7, but for * paranoia do it everywhere. */ - if (i == batch_idx) { + if (is_batch_buffer(eb, i)) { if (entry->relocation_count && !(ev->flags & EXEC_OBJECT_PINNED)) ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) ev->flags |= EXEC_OBJECT_NEEDS_FENCE; - eb->batch = ev; + eb->batches[*current_batch] = ev; + + if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) { + drm_dbg(&i915->drm, + "Attempting to use self-modifying batch buffer\n"); + return -EINVAL; + } + + if (range_overflows_t(u64, + eb->batch_start_offset, + eb->args->batch_len, + ev->vma->size)) { + drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); + return -EINVAL; + } + + if (eb->args->batch_len == 0) + eb->batch_len[*current_batch] = ev->vma->size - + eb->batch_start_offset; + else + eb->batch_len[*current_batch] = eb->args->batch_len; + if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */ + drm_dbg(&i915->drm, "Invalid batch length\n"); + return -EINVAL; + } + + ++*current_batch; } + + return 0; } static inline int use_cpu_reloc(const struct reloc_cache *cache, @@ -716,14 +769,6 @@ static int eb_reserve(struct i915_execbuffer *eb) } while (1); } -static unsigned int eb_batch_index(const struct i915_execbuffer *eb) -{ - if (eb->args->flags & I915_EXEC_BATCH_FIRST) - return 0; - else - return eb->buffer_count - 1; -} - static int eb_select_context(struct i915_execbuffer *eb) { struct i915_gem_context *ctx; @@ -733,7 +778,7 @@ static int eb_select_context(struct i915_execbuffer *eb) return PTR_ERR(ctx); eb->gem_context = ctx; - if (rcu_access_pointer(ctx->vm)) + if (i915_gem_context_has_full_ppgtt(ctx)) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; return 0; @@ -759,11 +804,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb, /* Check that the context hasn't been closed in the meantime */ err = -EINTR; if (!mutex_lock_interruptible(&ctx->lut_mutex)) { - struct i915_address_space *vm = rcu_access_pointer(ctx->vm); - - if (unlikely(vm && vma->vm != vm)) - err = -EAGAIN; /* user racing with ctx set-vm */ - else if (likely(!i915_gem_context_is_closed(ctx))) + if (likely(!i915_gem_context_is_closed(ctx))) err = radix_tree_insert(&ctx->handles_vma, handle, vma); else err = -ENOENT; @@ -814,6 +855,22 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) if (unlikely(!obj)) return ERR_PTR(-ENOENT); + /* + * If the user has opted-in for protected-object tracking, make + * sure the object encryption can be used. + * We only need to do this when the object is first used with + * this context, because the context itself will be banned when + * the protected objects become invalid. + */ + if (i915_gem_context_uses_protected_content(eb->gem_context) && + i915_gem_object_is_protected(obj)) { + err = intel_pxp_key_check(&vm->gt->pxp, obj, true); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + } + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { i915_gem_object_put(obj); @@ -832,9 +889,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) static int eb_lookup_vmas(struct i915_execbuffer *eb) { - struct drm_i915_private *i915 = eb->i915; - unsigned int batch = eb_batch_index(eb); - unsigned int i; + unsigned int i, current_batch = 0; int err = 0; INIT_LIST_HEAD(&eb->relocs); @@ -854,7 +909,9 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err; } - eb_add_vma(eb, i, batch, vma); + err = eb_add_vma(eb, ¤t_batch, i, vma); + if (err) + return err; if (i915_gem_object_is_userptr(vma->obj)) { err = i915_gem_object_userptr_submit_init(vma->obj); @@ -877,26 +934,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) } } - if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) { - drm_dbg(&i915->drm, - "Attempting to use self-modifying batch buffer\n"); - return -EINVAL; - } - - if (range_overflows_t(u64, - eb->batch_start_offset, eb->batch_len, - eb->batch->vma->size)) { - drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); - return -EINVAL; - } - - if (eb->batch_len == 0) - eb->batch_len = eb->batch->vma->size - eb->batch_start_offset; - if (unlikely(eb->batch_len == 0)) { /* impossible! */ - drm_dbg(&i915->drm, "Invalid batch length\n"); - return -EINVAL; - } - return 0; err: @@ -958,7 +995,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } if (!(ev->flags & EXEC_OBJECT_WRITE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (err) return err; } @@ -1011,6 +1048,7 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final) i915_vma_put(vma); } + eb_capture_release(eb); eb_unpin_engine(eb); } @@ -1629,8 +1667,7 @@ static int eb_reinit_userptr(struct i915_execbuffer *eb) return 0; } -static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb, - struct i915_request *rq) +static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb) { bool have_copy = false; struct eb_vma *ev; @@ -1646,21 +1683,6 @@ repeat: eb_release_vmas(eb, false); i915_gem_ww_ctx_fini(&eb->ww); - if (rq) { - /* nonblocking is always false */ - if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT) < 0) { - i915_request_put(rq); - rq = NULL; - - err = -EINTR; - goto err_relock; - } - - i915_request_put(rq); - rq = NULL; - } - /* * We take 3 passes through the slowpatch. * @@ -1687,28 +1709,21 @@ repeat: if (!err) err = eb_reinit_userptr(eb); -err_relock: i915_gem_ww_ctx_init(&eb->ww, true); if (err) goto out; /* reacquire the objects */ repeat_validate: - rq = eb_pin_engine(eb, false); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - rq = NULL; + err = eb_pin_engine(eb, false); + if (err) goto err; - } - - /* We didn't throttle, should be NULL */ - GEM_WARN_ON(rq); err = eb_validate_vmas(eb); if (err) goto err; - GEM_BUG_ON(!eb->batch); + GEM_BUG_ON(!eb->batches[0]); list_for_each_entry(ev, &eb->relocs, reloc_link) { if (!have_copy) { @@ -1772,46 +1787,23 @@ out: } } - if (rq) - i915_request_put(rq); - return err; } static int eb_relocate_parse(struct i915_execbuffer *eb) { int err; - struct i915_request *rq = NULL; bool throttle = true; retry: - rq = eb_pin_engine(eb, throttle); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - rq = NULL; + err = eb_pin_engine(eb, throttle); + if (err) { if (err != -EDEADLK) return err; goto err; } - if (rq) { - bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; - - /* Need to drop all locks now for throttling, take slowpath */ - err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0); - if (err == -ETIME) { - if (nonblock) { - err = -EWOULDBLOCK; - i915_request_put(rq); - goto err; - } - goto slow; - } - i915_request_put(rq); - rq = NULL; - } - /* only throttle once, even if we didn't need to throttle */ throttle = false; @@ -1851,7 +1843,7 @@ err: return err; slow: - err = eb_relocate_parse_slow(eb, rq); + err = eb_relocate_parse_slow(eb); if (err) /* * If the user expects the execobject.offset and @@ -1865,30 +1857,142 @@ slow: return err; } -static int eb_move_to_gpu(struct i915_execbuffer *eb) +/* + * Using two helper loops for the order of which requests / batches are created + * and added the to backend. Requests are created in order from the parent to + * the last child. Requests are added in the reverse order, from the last child + * to parent. This is done for locking reasons as the timeline lock is acquired + * during request creation and released when the request is added to the + * backend. To make lockdep happy (see intel_context_timeline_lock) this must be + * the ordering. + */ +#define for_each_batch_create_order(_eb, _i) \ + for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i)) +#define for_each_batch_add_order(_eb, _i) \ + BUILD_BUG_ON(!typecheck(int, _i)); \ + for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i)) + +static struct i915_request * +eb_find_first_request_added(struct i915_execbuffer *eb) +{ + int i; + + for_each_batch_add_order(eb, i) + if (eb->requests[i]) + return eb->requests[i]; + + GEM_BUG_ON("Request not found"); + + return NULL; +} + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ +static void eb_capture_stage(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; - unsigned int i = count; - int err = 0; + unsigned int i = count, j; + struct i915_vma_snapshot *vsnap; while (i--) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; unsigned int flags = ev->flags; - struct drm_i915_gem_object *obj = vma->obj; - assert_vma_held(vma); + if (!(flags & EXEC_OBJECT_CAPTURE)) + continue; + + vsnap = i915_vma_snapshot_alloc(GFP_KERNEL); + if (!vsnap) + continue; - if (flags & EXEC_OBJECT_CAPTURE) { + i915_vma_snapshot_init(vsnap, vma, "user"); + for_each_batch_create_order(eb, j) { struct i915_capture_list *capture; capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (capture) { - capture->next = eb->request->capture_list; - capture->vma = vma; - eb->request->capture_list = capture; - } + if (!capture) + continue; + + capture->next = eb->capture_lists[j]; + capture->vma_snapshot = i915_vma_snapshot_get(vsnap); + eb->capture_lists[j] = capture; } + i915_vma_snapshot_put(vsnap); + } +} + +/* Commit once we're in the critical path */ +static void eb_capture_commit(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + struct i915_request *rq = eb->requests[j]; + + if (!rq) + break; + + rq->capture_list = eb->capture_lists[j]; + eb->capture_lists[j] = NULL; + } +} + +/* + * Release anything that didn't get committed due to errors. + * The capture_list will otherwise be freed at request retire. + */ +static void eb_capture_release(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + if (eb->capture_lists[j]) { + i915_request_free_capture_list(eb->capture_lists[j]); + eb->capture_lists[j] = NULL; + } + } +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ + memset(eb->capture_lists, 0, sizeof(eb->capture_lists)); +} + +#else + +static void eb_capture_stage(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_commit(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_release(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ +} + +#endif + +static int eb_move_to_gpu(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i = count; + int err = 0, j; + + while (i--) { + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + unsigned int flags = ev->flags; + struct drm_i915_gem_object *obj = vma->obj; + + assert_vma_held(vma); /* * If the GPU is not _reading_ through the CPU cache, we need @@ -1901,20 +2005,43 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) * but gcc's optimiser doesn't handle that as well and emits * two jumps instead of one. Maybe one day... + * + * FIXME: There is also sync flushing in set_pages(), which + * serves a different purpose(some of the time at least). + * + * We should consider: + * + * 1. Rip out the async flush code. + * + * 2. Or make the sync flushing use the async clflush path + * using mandatory fences underneath. Currently the below + * async flush happens after we bind the object. */ if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { if (i915_gem_clflush_object(obj, 0)) flags &= ~EXEC_OBJECT_ASYNC; } + /* We only need to await on the first request */ if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { err = i915_request_await_object - (eb->request, obj, flags & EXEC_OBJECT_WRITE); + (eb_find_first_request_added(eb), obj, + flags & EXEC_OBJECT_WRITE); } - if (err == 0) - err = i915_vma_move_to_active(vma, eb->request, - flags | __EXEC_OBJECT_NO_RESERVE); + for_each_batch_add_order(eb, j) { + if (err) + break; + if (!eb->requests[j]) + continue; + + err = _i915_vma_move_to_active(vma, eb->requests[j], + j ? NULL : + eb->composite_fence ? + eb->composite_fence : + &eb->requests[j]->fence, + flags | __EXEC_OBJECT_NO_RESERVE); + } } #ifdef CONFIG_MMU_NOTIFIER @@ -1945,11 +2072,18 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) goto err_skip; /* Unconditionally flush any chipset caches (for streaming writes). */ - intel_gt_chipset_flush(eb->engine->gt); + intel_gt_chipset_flush(eb->gt); + eb_capture_commit(eb); + return 0; err_skip: - i915_request_set_error_once(eb->request, err); + for_each_batch_create_order(eb, j) { + if (!eb->requests[j]) + break; + + i915_request_set_error_once(eb->requests[j], err); + } return err; } @@ -2044,14 +2178,17 @@ static int eb_parse(struct i915_execbuffer *eb) int err; if (!eb_use_cmdparser(eb)) { - batch = eb_dispatch_secure(eb, eb->batch->vma); + batch = eb_dispatch_secure(eb, eb->batches[0]->vma); if (IS_ERR(batch)) return PTR_ERR(batch); goto secure_batch; } - len = eb->batch_len; + if (intel_context_is_parallel(eb->context)) + return -EINVAL; + + len = eb->batch_len[0]; if (!CMDPARSER_USES_GGTT(eb->i915)) { /* * ppGTT backed shadow buffers must be mapped RO, to prevent @@ -2065,11 +2202,11 @@ static int eb_parse(struct i915_execbuffer *eb) } else { len += I915_CMD_PARSER_TRAMPOLINE_SIZE; } - if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */ + if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */ return -EINVAL; if (!pool) { - pool = intel_gt_get_buffer_pool(eb->engine->gt, len, + pool = intel_gt_get_buffer_pool(eb->gt, len, I915_MAP_WB); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -2094,7 +2231,7 @@ static int eb_parse(struct i915_execbuffer *eb) trampoline = shadow; shadow = shadow_batch_pin(eb, pool->obj, - &eb->engine->gt->ggtt->vm, + &eb->gt->ggtt->vm, PIN_GLOBAL); if (IS_ERR(shadow)) { err = PTR_ERR(shadow); @@ -2112,30 +2249,33 @@ static int eb_parse(struct i915_execbuffer *eb) goto err_trampoline; } - err = dma_resv_reserve_shared(shadow->resv, 1); + err = dma_resv_reserve_shared(shadow->obj->base.resv, 1); if (err) goto err_trampoline; - err = intel_engine_cmd_parser(eb->engine, - eb->batch->vma, + err = intel_engine_cmd_parser(eb->context->engine, + eb->batches[0]->vma, eb->batch_start_offset, - eb->batch_len, + eb->batch_len[0], shadow, trampoline); if (err) goto err_unpin_batch; - eb->batch = &eb->vma[eb->buffer_count++]; - eb->batch->vma = i915_vma_get(shadow); - eb->batch->flags = __EXEC_OBJECT_HAS_PIN; + eb->batches[0] = &eb->vma[eb->buffer_count++]; + eb->batches[0]->vma = i915_vma_get(shadow); + eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; eb->trampoline = trampoline; eb->batch_start_offset = 0; secure_batch: if (batch) { - eb->batch = &eb->vma[eb->buffer_count++]; - eb->batch->flags = __EXEC_OBJECT_HAS_PIN; - eb->batch->vma = i915_vma_get(batch); + if (intel_context_is_parallel(eb->context)) + return -EINVAL; + + eb->batches[0] = &eb->vma[eb->buffer_count++]; + eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN; + eb->batches[0]->vma = i915_vma_get(batch); } return 0; @@ -2151,19 +2291,18 @@ err: return err; } -static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) +static int eb_request_submit(struct i915_execbuffer *eb, + struct i915_request *rq, + struct i915_vma *batch, + u64 batch_len) { int err; - if (intel_context_nopreempt(eb->context)) - __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); - - err = eb_move_to_gpu(eb); - if (err) - return err; + if (intel_context_nopreempt(rq->context)) + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { - err = i915_reset_gen7_sol_offsets(eb->request); + err = i915_reset_gen7_sol_offsets(rq); if (err) return err; } @@ -2174,26 +2313,26 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) * allows us to determine if the batch is still waiting on the GPU * or actually running by checking the breadcrumb. */ - if (eb->engine->emit_init_breadcrumb) { - err = eb->engine->emit_init_breadcrumb(eb->request); + if (rq->context->engine->emit_init_breadcrumb) { + err = rq->context->engine->emit_init_breadcrumb(rq); if (err) return err; } - err = eb->engine->emit_bb_start(eb->request, - batch->node.start + - eb->batch_start_offset, - eb->batch_len, - eb->batch_flags); + err = rq->context->engine->emit_bb_start(rq, + batch->node.start + + eb->batch_start_offset, + batch_len, + eb->batch_flags); if (err) return err; if (eb->trampoline) { + GEM_BUG_ON(intel_context_is_parallel(rq->context)); GEM_BUG_ON(eb->batch_start_offset); - err = eb->engine->emit_bb_start(eb->request, - eb->trampoline->node.start + - eb->batch_len, - 0, 0); + err = rq->context->engine->emit_bb_start(rq, + eb->trampoline->node.start + + batch_len, 0, 0); if (err) return err; } @@ -2201,6 +2340,27 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) return 0; } +static int eb_submit(struct i915_execbuffer *eb) +{ + unsigned int i; + int err; + + err = eb_move_to_gpu(eb); + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + trace_i915_request_queue(eb->requests[i], eb->batch_flags); + if (!err) + err = eb_request_submit(eb, eb->requests[i], + eb->batches[i]->vma, + eb->batch_len[i]); + } + + return err; +} + static int num_vcs_engines(const struct drm_i915_private *i915) { return hweight_long(VDBOX_MASK(&i915->gt)); @@ -2266,26 +2426,11 @@ static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel return i915_request_get(rq); } -static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle) +static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, + bool throttle) { - struct intel_context *ce = eb->context; struct intel_timeline *tl; struct i915_request *rq = NULL; - int err; - - GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); - - if (unlikely(intel_context_is_banned(ce))) - return ERR_PTR(-EIO); - - /* - * Pinning the contexts may generate requests in order to acquire - * GGTT space, so do this first before we reserve a seqno for - * ourselves. - */ - err = intel_context_pin_ww(ce, &eb->ww); - if (err) - return ERR_PTR(err); /* * Take a local wakeref for preparing to dispatch the execbuf as @@ -2296,33 +2441,108 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt * taken on the engine, and the parent device. */ tl = intel_context_timeline_lock(ce); - if (IS_ERR(tl)) { - intel_context_unpin(ce); - return ERR_CAST(tl); - } + if (IS_ERR(tl)) + return PTR_ERR(tl); intel_context_enter(ce); if (throttle) rq = eb_throttle(eb, ce); intel_context_timeline_unlock(tl); + if (rq) { + bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; + long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT; + + if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, + timeout) < 0) { + i915_request_put(rq); + + tl = intel_context_timeline_lock(ce); + intel_context_exit(ce); + intel_context_timeline_unlock(tl); + + if (nonblock) + return -EWOULDBLOCK; + else + return -EINTR; + } + i915_request_put(rq); + } + + return 0; +} + +static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle) +{ + struct intel_context *ce = eb->context, *child; + int err; + int i = 0, j = 0; + + GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED); + + if (unlikely(intel_context_is_banned(ce))) + return -EIO; + + /* + * Pinning the contexts may generate requests in order to acquire + * GGTT space, so do this first before we reserve a seqno for + * ourselves. + */ + err = intel_context_pin_ww(ce, &eb->ww); + if (err) + return err; + for_each_child(ce, child) { + err = intel_context_pin_ww(child, &eb->ww); + GEM_BUG_ON(err); /* perma-pinned should incr a counter */ + } + + for_each_child(ce, child) { + err = eb_pin_timeline(eb, child, throttle); + if (err) + goto unwind; + ++i; + } + err = eb_pin_timeline(eb, ce, throttle); + if (err) + goto unwind; + eb->args->flags |= __EXEC_ENGINE_PINNED; - return rq; + return 0; + +unwind: + for_each_child(ce, child) { + if (j++ < i) { + mutex_lock(&child->timeline->mutex); + intel_context_exit(child); + mutex_unlock(&child->timeline->mutex); + } + } + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + return err; } static void eb_unpin_engine(struct i915_execbuffer *eb) { - struct intel_context *ce = eb->context; - struct intel_timeline *tl = ce->timeline; + struct intel_context *ce = eb->context, *child; if (!(eb->args->flags & __EXEC_ENGINE_PINNED)) return; eb->args->flags &= ~__EXEC_ENGINE_PINNED; - mutex_lock(&tl->mutex); + for_each_child(ce, child) { + mutex_lock(&child->timeline->mutex); + intel_context_exit(child); + mutex_unlock(&child->timeline->mutex); + + intel_context_unpin(child); + } + + mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); - mutex_unlock(&tl->mutex); + mutex_unlock(&ce->timeline->mutex); intel_context_unpin(ce); } @@ -2373,7 +2593,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb) static int eb_select_engine(struct i915_execbuffer *eb) { - struct intel_context *ce; + struct intel_context *ce, *child; unsigned int idx; int err; @@ -2386,6 +2606,20 @@ eb_select_engine(struct i915_execbuffer *eb) if (IS_ERR(ce)) return PTR_ERR(ce); + if (intel_context_is_parallel(ce)) { + if (eb->buffer_count < ce->parallel.number_children + 1) { + intel_context_put(ce); + return -EINVAL; + } + if (eb->batch_start_offset || eb->args->batch_len) { + intel_context_put(ce); + return -EINVAL; + } + } + eb->num_batches = ce->parallel.number_children + 1; + + for_each_child(ce, child) + intel_context_get(child); intel_gt_pm_get(ce->engine->gt); if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { @@ -2393,6 +2627,13 @@ eb_select_engine(struct i915_execbuffer *eb) if (err) goto err; } + for_each_child(ce, child) { + if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) { + err = intel_context_alloc_state(child); + if (err) + goto err; + } + } /* * ABI: Before userspace accesses the GPU (e.g. execbuffer), report @@ -2403,7 +2644,7 @@ eb_select_engine(struct i915_execbuffer *eb) goto err; eb->context = ce; - eb->engine = ce->engine; + eb->gt = ce->engine->gt; /* * Make sure engine pool stays alive even if we call intel_context_put @@ -2414,6 +2655,8 @@ eb_select_engine(struct i915_execbuffer *eb) err: intel_gt_pm_put(ce->engine->gt); + for_each_child(ce, child) + intel_context_put(child); intel_context_put(ce); return err; } @@ -2421,7 +2664,11 @@ err: static void eb_put_engine(struct i915_execbuffer *eb) { - intel_gt_pm_put(eb->engine->gt); + struct intel_context *child; + + intel_gt_pm_put(eb->gt); + for_each_child(eb->context, child) + intel_context_put(child); intel_context_put(eb->context); } @@ -2644,7 +2891,8 @@ static void put_fence_array(struct eb_fence *fences, int num_fences) } static int -await_fence_array(struct i915_execbuffer *eb) +await_fence_array(struct i915_execbuffer *eb, + struct i915_request *rq) { unsigned int n; int err; @@ -2658,8 +2906,7 @@ await_fence_array(struct i915_execbuffer *eb) if (!eb->fences[n].dma_fence) continue; - err = i915_request_await_dma_fence(eb->request, - eb->fences[n].dma_fence); + err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence); if (err < 0) return err; } @@ -2667,9 +2914,9 @@ await_fence_array(struct i915_execbuffer *eb) return 0; } -static void signal_fence_array(const struct i915_execbuffer *eb) +static void signal_fence_array(const struct i915_execbuffer *eb, + struct dma_fence * const fence) { - struct dma_fence * const fence = &eb->request->fence; unsigned int n; for (n = 0; n < eb->num_fences; n++) { @@ -2717,9 +2964,9 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static int eb_request_add(struct i915_execbuffer *eb, int err) +static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq, + int err, bool last_parallel) { - struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_sched_attr attr = {}; struct i915_request *prev; @@ -2741,6 +2988,17 @@ static int eb_request_add(struct i915_execbuffer *eb, int err) err = -ENOENT; /* override any transient errors */ } + if (intel_context_is_parallel(eb->context)) { + if (err) { + __i915_request_skip(rq); + set_bit(I915_FENCE_FLAG_SKIP_PARALLEL, + &rq->fence.flags); + } + if (last_parallel) + set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, + &rq->fence.flags); + } + __i915_request_queue(rq, &attr); /* Try to clean up the client's timeline after submitting the request */ @@ -2752,6 +3010,25 @@ static int eb_request_add(struct i915_execbuffer *eb, int err) return err; } +static int eb_requests_add(struct i915_execbuffer *eb, int err) +{ + int i; + + /* + * We iterate in reverse order of creation to release timeline mutexes in + * same order. + */ + for_each_batch_add_order(eb, i) { + struct i915_request *rq = eb->requests[i]; + + if (!rq) + continue; + err |= eb_request_add(eb, rq, err, i == 0); + } + + return err; +} + static const i915_user_extension_fn execbuf_extensions[] = { [DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences, }; @@ -2778,6 +3055,186 @@ parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args, eb); } +static void eb_requests_get(struct i915_execbuffer *eb) +{ + unsigned int i; + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + i915_request_get(eb->requests[i]); + } +} + +static void eb_requests_put(struct i915_execbuffer *eb) +{ + unsigned int i; + + for_each_batch_create_order(eb, i) { + if (!eb->requests[i]) + break; + + i915_request_put(eb->requests[i]); + } +} + +static struct sync_file * +eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + struct dma_fence_array *fence_array; + struct dma_fence **fences; + unsigned int i; + + GEM_BUG_ON(!intel_context_is_parent(eb->context)); + + fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL); + if (!fences) + return ERR_PTR(-ENOMEM); + + for_each_batch_create_order(eb, i) { + fences[i] = &eb->requests[i]->fence; + __set_bit(I915_FENCE_FLAG_COMPOSITE, + &eb->requests[i]->fence.flags); + } + + fence_array = dma_fence_array_create(eb->num_batches, + fences, + eb->context->parallel.fence_context, + eb->context->parallel.seqno, + false); + if (!fence_array) { + kfree(fences); + return ERR_PTR(-ENOMEM); + } + + /* Move ownership to the dma_fence_array created above */ + for_each_batch_create_order(eb, i) + dma_fence_get(fences[i]); + + if (out_fence_fd != -1) { + out_fence = sync_file_create(&fence_array->base); + /* sync_file now owns fence_arry, drop creation ref */ + dma_fence_put(&fence_array->base); + if (!out_fence) + return ERR_PTR(-ENOMEM); + } + + eb->composite_fence = &fence_array->base; + + return out_fence; +} + +static struct sync_file * +eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq, + struct dma_fence *in_fence, int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + int err; + + if (unlikely(eb->gem_context->syncobj)) { + struct dma_fence *fence; + + fence = drm_syncobj_fence_get(eb->gem_context->syncobj); + err = i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + if (err) + return ERR_PTR(err); + } + + if (in_fence) { + if (eb->args->flags & I915_EXEC_FENCE_SUBMIT) + err = i915_request_await_execution(rq, in_fence); + else + err = i915_request_await_dma_fence(rq, in_fence); + if (err < 0) + return ERR_PTR(err); + } + + if (eb->fences) { + err = await_fence_array(eb, rq); + if (err) + return ERR_PTR(err); + } + + if (intel_context_is_parallel(eb->context)) { + out_fence = eb_composite_fence_create(eb, out_fence_fd); + if (IS_ERR(out_fence)) + return ERR_PTR(-ENOMEM); + } else if (out_fence_fd != -1) { + out_fence = sync_file_create(&rq->fence); + if (!out_fence) + return ERR_PTR(-ENOMEM); + } + + return out_fence; +} + +static struct intel_context * +eb_find_context(struct i915_execbuffer *eb, unsigned int context_number) +{ + struct intel_context *child; + + if (likely(context_number == 0)) + return eb->context; + + for_each_child(eb->context, child) + if (!--context_number) + return child; + + GEM_BUG_ON("Context not found"); + + return NULL; +} + +static struct sync_file * +eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, + int out_fence_fd) +{ + struct sync_file *out_fence = NULL; + unsigned int i; + + for_each_batch_create_order(eb, i) { + /* Allocate a request for this batch buffer nice and early. */ + eb->requests[i] = i915_request_create(eb_find_context(eb, i)); + if (IS_ERR(eb->requests[i])) { + out_fence = ERR_CAST(eb->requests[i]); + eb->requests[i] = NULL; + return out_fence; + } + + /* + * Only the first request added (committed to backend) has to + * take the in fences into account as all subsequent requests + * will have fences inserted inbetween them. + */ + if (i + 1 == eb->num_batches) { + out_fence = eb_fences_add(eb, eb->requests[i], + in_fence, out_fence_fd); + if (IS_ERR(out_fence)) + return out_fence; + } + + /* + * Not really on stack, but we don't want to call + * kfree on the batch_snapshot when we put it, so use the + * _onstack interface. + */ + if (eb->batches[i]->vma) + i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot, + eb->batches[i]->vma, + "batch"); + if (eb->batch_pool) { + GEM_BUG_ON(intel_context_is_parallel(eb->context)); + intel_gt_buffer_pool_mark_active(eb->batch_pool, + eb->requests[i]); + } + } + + return out_fence; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, @@ -2788,7 +3245,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct i915_execbuffer eb; struct dma_fence *in_fence = NULL; struct sync_file *out_fence = NULL; - struct i915_vma *batch; int out_fence_fd = -1; int err; @@ -2812,12 +3268,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.buffer_count = args->buffer_count; eb.batch_start_offset = args->batch_start_offset; - eb.batch_len = args->batch_len; eb.trampoline = NULL; eb.fences = NULL; eb.num_fences = 0; + eb_capture_list_clear(&eb); + + memset(eb.requests, 0, sizeof(struct i915_request *) * + ARRAY_SIZE(eb.requests)); + eb.composite_fence = NULL; + eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { if (GRAPHICS_VER(i915) >= 11) @@ -2900,71 +3361,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, } ww_acquire_done(&eb.ww.ctx); + eb_capture_stage(&eb); - batch = eb.batch->vma; - - /* Allocate a request for this batch buffer nice and early. */ - eb.request = i915_request_create(eb.context); - if (IS_ERR(eb.request)) { - err = PTR_ERR(eb.request); - goto err_vma; - } - - if (unlikely(eb.gem_context->syncobj)) { - struct dma_fence *fence; - - fence = drm_syncobj_fence_get(eb.gem_context->syncobj); - err = i915_request_await_dma_fence(eb.request, fence); - dma_fence_put(fence); - if (err) - goto err_ext; - } - - if (in_fence) { - if (args->flags & I915_EXEC_FENCE_SUBMIT) - err = i915_request_await_execution(eb.request, - in_fence); - else - err = i915_request_await_dma_fence(eb.request, - in_fence); - if (err < 0) - goto err_request; - } - - if (eb.fences) { - err = await_fence_array(&eb); - if (err) + out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); + if (IS_ERR(out_fence)) { + err = PTR_ERR(out_fence); + out_fence = NULL; + if (eb.requests[0]) goto err_request; + else + goto err_vma; } - if (out_fence_fd != -1) { - out_fence = sync_file_create(&eb.request->fence); - if (!out_fence) { - err = -ENOMEM; - goto err_request; - } - } - - /* - * Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - eb.request->batch = batch; - if (eb.batch_pool) - intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request); - - trace_i915_request_queue(eb.request, eb.batch_flags); - err = eb_submit(&eb, batch); + err = eb_submit(&eb); err_request: - i915_request_get(eb.request); - err = eb_request_add(&eb, err); + eb_requests_get(&eb); + err = eb_requests_add(&eb, err); if (eb.fences) - signal_fence_array(&eb); + signal_fence_array(&eb, eb.composite_fence ? + eb.composite_fence : + &eb.requests[0]->fence); if (out_fence) { if (err == 0) { @@ -2979,10 +3397,15 @@ err_request: if (unlikely(eb.gem_context->syncobj)) { drm_syncobj_replace_fence(eb.gem_context->syncobj, - &eb.request->fence); + eb.composite_fence ? + eb.composite_fence : + &eb.requests[0]->fence); } - i915_request_put(eb.request); + if (!out_fence && eb.composite_fence) + dma_fence_put(eb.composite_fence); + + eb_requests_put(&eb); err_vma: eb_release_vmas(&eb, true); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index e5ae9c06510c..c5150a1ee3d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -134,6 +134,8 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, internal_free_pages(pages); obj->mm.dirty = false; + + __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { @@ -143,24 +145,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { .put_pages = i915_gem_object_put_pages_internal, }; -/** - * i915_gem_object_create_internal: create an object with volatile pages - * @i915: the i915 device - * @size: the size in bytes of backing storage to allocate for the object - * - * Creates a new object that wraps some internal memory for private use. - * This object is not backed by swappable storage, and as such its contents - * are volatile and only valid whilst pinned. If the object is reaped by the - * shrinker, its pages and data will be discarded. Equally, it is not a full - * GEM object and so not valid for access from userspace. This makes it useful - * for hardware interfaces like ringbuffers (which are pinned from the time - * the request is written to the time the hardware stops accessing it), but - * not for contexts (which need to be preserved when not active for later - * reuse). Note that it is not cleared upon allocation. - */ struct drm_i915_gem_object * -i915_gem_object_create_internal(struct drm_i915_private *i915, - phys_addr_t size) +__i915_gem_object_create_internal(struct drm_i915_private *i915, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; @@ -177,7 +165,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0); + i915_gem_object_init(obj, ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; /* @@ -197,3 +185,25 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return obj; } + +/** + * i915_gem_object_create_internal: create an object with volatile pages + * @i915: the i915 device + * @size: the size in bytes of backing storage to allocate for the object + * + * Creates a new object that wraps some internal memory for private use. + * This object is not backed by swappable storage, and as such its contents + * are volatile and only valid whilst pinned. If the object is reaped by the + * shrinker, its pages and data will be discarded. Equally, it is not a full + * GEM object and so not valid for access from userspace. This makes it useful + * for hardware interfaces like ringbuffers (which are pinned from the time + * the request is written to the time the hardware stops accessing it), but + * not for contexts (which need to be preserved when not active for later + * reuse). Note that it is not cleared upon allocation. + */ +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + phys_addr_t size) +{ + return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c index eb345305dc52..444f8268b9c5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -56,8 +56,8 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) * @obj: The object to check. * * This function is intended to be called from within the fence signaling - * path where the fence keeps the object from being migrated. For example - * during gpu reset or similar. + * path where the fence, or a pin, keeps the object from being migrated. For + * example during gpu reset or similar. * * Return: Whether the object is resident in lmem. */ @@ -66,7 +66,8 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) struct intel_memory_region *mr = READ_ONCE(obj->mm.region); #ifdef CONFIG_LOCKDEP - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true)); + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) && + i915_gem_object_evictable(obj)); #endif return mr && (mr->type == INTEL_MEMORY_LOCAL || mr->type == INTEL_MEMORY_STOLEN_LOCAL); @@ -104,6 +105,32 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915, } struct drm_i915_gem_object * +i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, + const void *data, size_t size) +{ + struct drm_i915_gem_object *obj; + void *map; + + obj = i915_gem_object_create_lmem(i915, + round_up(size, PAGE_SIZE), + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return obj; + + map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); + if (IS_ERR(map)) { + i915_gem_object_put(obj); + return map; + } + + memcpy(map, data, size); + + i915_gem_object_unpin_map(obj); + + return obj; +} + +struct drm_i915_gem_object * i915_gem_object_create_lmem(struct drm_i915_private *i915, resource_size_t size, unsigned int flags) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h index 4ee81fc66302..1b88ea13435c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -24,6 +24,10 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); struct drm_i915_gem_object * +i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915, + const void *data, size_t size); + +struct drm_i915_gem_object * __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915, resource_size_t size, resource_size_t page_size, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 5130e8ed9564..39bb15eafc07 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -17,6 +17,7 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_gem_mman.h" +#include "i915_mm.h" #include "i915_trace.h" #include "i915_user_extensions.h" #include "i915_gem_ttm.h" @@ -395,7 +396,7 @@ retry: /* Track the mmo associated with the fenced vma */ vma->mmo = mmo; - if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)) + if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 6fb9afb65034..5fac9b560b73 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -25,11 +25,13 @@ #include <linux/sched/mm.h> #include "display/intel_frontbuffer.h" +#include "pxp/intel_pxp.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_ttm.h" #include "i915_memcpy.h" #include "i915_trace.h" @@ -90,25 +92,71 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } /** - * Mark up the object's coherency levels for a given cache_level + * __i915_gem_object_fini - Clean up a GEM object initialization + * @obj: The gem object to cleanup + * + * This function cleans up gem object fields that are set up by + * drm_gem_private_object_init() and i915_gem_object_init(). + * It's primarily intended as a helper for backends that need to + * clean up the gem object in separate steps. + */ +void __i915_gem_object_fini(struct drm_i915_gem_object *obj) +{ + mutex_destroy(&obj->mm.get_page.lock); + mutex_destroy(&obj->mm.get_dma_page.lock); + dma_resv_fini(&obj->base._resv); +} + +/** + * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels + * for a given cache_level * @obj: #drm_i915_gem_object * @cache_level: cache level */ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + obj->cache_level = cache_level; if (cache_level != I915_CACHE_NONE) obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | I915_BO_CACHE_COHERENT_FOR_WRITE); - else if (HAS_LLC(to_i915(obj->base.dev))) + else if (HAS_LLC(i915)) obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; else obj->cache_coherent = 0; obj->cache_dirty = - !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); + !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && + !IS_DGFX(i915); +} + +bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + /* + * This is purely from a security perspective, so we simply don't care + * about non-userspace objects being able to bypass the LLC. + */ + if (!(obj->flags & I915_BO_ALLOC_USER)) + return false; + + /* + * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it + * possible for userspace to bypass the GTT caching bits set by the + * kernel, as per the given object cache_level. This is troublesome + * since the heavy flush we apply when first gathering the pages is + * skipped if the kernel thinks the object is coherent with the GPU. As + * a result it might be possible to bypass the cache and read the + * contents of the page directly, which could be stale data. If it's + * just a case of userspace shooting themselves in the foot then so be + * it, but since i915 takes the stance of always zeroing memory before + * handing it to userspace, we need to prevent this. + */ + return IS_JSL_EHL(i915); } static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) @@ -174,7 +222,6 @@ void __i915_gem_free_object_rcu(struct rcu_head *head) container_of(head, typeof(*obj), rcu); struct drm_i915_private *i915 = to_i915(obj->base.dev); - dma_resv_fini(&obj->base._resv); i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); @@ -204,10 +251,17 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) } } -void __i915_gem_free_object(struct drm_i915_gem_object *obj) +/** + * __i915_gem_object_pages_fini - Clean up pages use of a gem object + * @obj: The gem object to clean up + * + * This function cleans up usage of the object mm.pages member. It + * is intended for backends that need to clean up a gem object in + * separate steps and needs to be called when the object is idle before + * the object's backing memory is freed. + */ +void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) { - trace_i915_gem_object_destroy(obj); - if (!list_empty(&obj->vma.list)) { struct i915_vma *vma; @@ -233,11 +287,17 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj) __i915_gem_object_free_mmaps(obj); - GEM_BUG_ON(!list_empty(&obj->lut_list)); - atomic_set(&obj->mm.pages_pin_count, 0); __i915_gem_object_put_pages(obj); GEM_BUG_ON(i915_gem_object_has_pages(obj)); +} + +void __i915_gem_free_object(struct drm_i915_gem_object *obj) +{ + trace_i915_gem_object_destroy(obj); + + GEM_BUG_ON(!list_empty(&obj->lut_list)); + bitmap_free(obj->bit_17); if (obj->base.import_attach) @@ -253,6 +313,8 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj) if (obj->shares_resv_from) i915_vm_resv_put(obj->shares_resv_from); + + __i915_gem_object_fini(obj); } static void __i915_gem_free_objects(struct drm_i915_private *i915, @@ -266,6 +328,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, obj->ops->delayed_free(obj); continue; } + __i915_gem_object_pages_fini(obj); __i915_gem_free_object(obj); /* But keep the pointer alive for RCU-protected lookups */ @@ -306,15 +369,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) atomic_inc(&i915->mm.free_count); /* - * This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - i915_gem_object_make_unshrinkable(obj); - - /* * Since we require blocking on struct_mutex to unbind the freed * object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may @@ -398,7 +452,7 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset * from can't cross a page boundary. The caller must ensure that @obj pages * are pinned and that @obj is synced wrt. any related writes. * - * Returns 0 on success or -ENODEV if the type of @obj's backing store is + * Return: %0 on success or -ENODEV if the type of @obj's backing store is * unsupported. */ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) @@ -674,6 +728,57 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { .export = i915_gem_prime_export, }; +/** + * i915_gem_object_get_moving_fence - Get the object's moving fence if any + * @obj: The object whose moving fence to get. + * + * A non-signaled moving fence means that there is an async operation + * pending on the object that needs to be waited on before setting up + * any GPU- or CPU PTEs to the object's pages. + * + * Return: A refcounted pointer to the object's moving fence if any, + * NULL otherwise. + */ +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) +{ + return dma_fence_get(i915_gem_to_ttm(obj)->moving); +} + +/** + * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any + * @obj: The object whose moving fence to wait for. + * @intr: Whether to wait interruptible. + * + * If the moving fence signaled without an error, it is detached from the + * object and put. + * + * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, + * negative error code if the async operation represented by the + * moving fence failed. + */ +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr) +{ + struct dma_fence *fence = i915_gem_to_ttm(obj)->moving; + int ret; + + assert_object_held(obj); + if (!fence) + return 0; + + ret = dma_fence_wait(fence, intr); + if (ret) + return ret; + + if (fence->error) + return fence->error; + + i915_gem_to_ttm(obj)->moving = NULL; + dma_fence_put(fence); + return 0; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/huge_gem_object.c" #include "selftests/huge_pages.c" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 48112b9d76df..66f20b803b01 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -58,6 +58,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops, struct lock_class_key *key, unsigned alloc_flags); + +void __i915_gem_object_fini(struct drm_i915_gem_object *obj); + struct drm_i915_gem_object * i915_gem_object_create_shmem(struct drm_i915_private *i915, resource_size_t size); @@ -90,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915); struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle @@ -270,6 +272,12 @@ i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) +{ + return obj->flags & I915_BO_PROTECTED; +} + +static inline bool i915_gem_object_type_has(const struct drm_i915_gem_object *obj, unsigned long flags) { @@ -287,6 +295,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); +} + +static inline bool i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) { return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); @@ -440,7 +454,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) } int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +int i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj); /** @@ -503,27 +517,18 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -static inline struct intel_engine_cs * -i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) -{ - struct intel_engine_cs *engine = NULL; - struct dma_fence *fence; - - rcu_read_lock(); - fence = dma_resv_get_excl_unlocked(obj->base.resv); - rcu_read_unlock(); - - if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) - engine = to_request(fence)->engine; - dma_fence_put(fence); +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj); - return engine; -} +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr); void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); +bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); @@ -540,25 +545,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); -static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - if (obj->cache_dirty) - return false; - - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) - return true; - - /* Currently in use by HW (display engine)? Keep flushed. */ - return i915_gem_object_is_framebuffer(obj); -} - static inline void __start_cpu_write(struct drm_i915_gem_object *obj) { obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; - if (cpu_write_needs_clflush(obj)) + if (i915_gem_cpu_write_needs_clflush(obj)) obj->cache_dirty = true; } @@ -599,6 +594,8 @@ bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); void __i915_gem_free_object_rcu(struct rcu_head *head); +void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj); + void __i915_gem_free_object(struct drm_i915_gem_object *obj); bool i915_gem_object_evictable(struct drm_i915_gem_object *obj); @@ -618,6 +615,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, enum intel_memory_type type); +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment); +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup); +void __shmem_writeback(size_t size, struct address_space *mapping); + #ifdef CONFIG_MMU_NOTIFIER static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 2471f36aaff3..f9f7e44099fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -34,9 +34,11 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_NO_MMAP BIT(3) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) +/* Skip the shrinker management in set_pages/unset_pages */ +#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2) +#define I915_GEM_OBJECT_IS_PROXY BIT(3) +#define I915_GEM_OBJECT_NO_MMAP BIT(4) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -54,8 +56,11 @@ struct drm_i915_gem_object_ops { int (*get_pages)(struct drm_i915_gem_object *obj); void (*put_pages)(struct drm_i915_gem_object *obj, struct sg_table *pages); - void (*truncate)(struct drm_i915_gem_object *obj); + int (*truncate)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj); + int (*shrinker_release_pages)(struct drm_i915_gem_object *obj, + bool no_gpu_wait, + bool should_writeback); int (*pread)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *arg); @@ -288,17 +293,23 @@ struct drm_i915_gem_object { I915_SELFTEST_DECLARE(struct list_head st_link); unsigned long flags; -#define I915_BO_ALLOC_CONTIGUOUS BIT(0) -#define I915_BO_ALLOC_VOLATILE BIT(1) -#define I915_BO_ALLOC_CPU_CLEAR BIT(2) -#define I915_BO_ALLOC_USER BIT(3) +#define I915_BO_ALLOC_CONTIGUOUS BIT(0) +#define I915_BO_ALLOC_VOLATILE BIT(1) +#define I915_BO_ALLOC_CPU_CLEAR BIT(2) +#define I915_BO_ALLOC_USER BIT(3) +/* Object is allowed to lose its contents on suspend / resume, even if pinned */ +#define I915_BO_ALLOC_PM_VOLATILE BIT(4) +/* Object needs to be restored early using memcpy during resume */ +#define I915_BO_ALLOC_PM_EARLY BIT(5) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \ I915_BO_ALLOC_VOLATILE | \ I915_BO_ALLOC_CPU_CLEAR | \ - I915_BO_ALLOC_USER) -#define I915_BO_READONLY BIT(4) -#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */ - + I915_BO_ALLOC_USER | \ + I915_BO_ALLOC_PM_VOLATILE | \ + I915_BO_ALLOC_PM_EARLY) +#define I915_BO_READONLY BIT(6) +#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */ +#define I915_BO_PROTECTED BIT(8) /** * @mem_flags - Mutable placement-related flags * @@ -421,6 +432,33 @@ struct drm_i915_gem_object { * can freely bypass the CPU cache when touching the pages with the GPU, * where the kernel is completely unaware. On such platform we need * apply the sledgehammer-on-acquire regardless of the @cache_coherent. + * + * Special care is taken on non-LLC platforms, to prevent potential + * information leak. The driver currently ensures: + * + * 1. All userspace objects, by default, have @cache_level set as + * I915_CACHE_NONE. The only exception is userptr objects, where we + * instead force I915_CACHE_LLC, but we also don't allow userspace to + * ever change the @cache_level for such objects. Another special case + * is dma-buf, which doesn't rely on @cache_dirty, but there we + * always do a forced flush when acquiring the pages, if there is a + * chance that the pages can be read directly from main memory with + * the GPU. + * + * 2. All I915_CACHE_NONE objects have @cache_dirty initially true. + * + * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to + * true. + * + * 4. The @cache_dirty is never freely reset before the initial + * flush, even if userspace adjusts the @cache_level through the + * i915_gem_set_caching_ioctl. + * + * 5. All @cache_dirty objects(including swapped-in) are initially + * flushed with a synchronous call to drm_clflush_sg in + * __i915_gem_object_set_pages. The @cache_dirty can be freely reset + * at this point. All further asynchronous clfushes are never security + * critical, i.e userspace is free to race against itself. */ unsigned int cache_dirty:1; @@ -453,9 +491,37 @@ struct drm_i915_gem_object { * instead go through the pin/unpin interfaces. */ atomic_t pages_pin_count; + + /** + * @shrink_pin: Prevents the pages from being made visible to + * the shrinker, while the shrink_pin is non-zero. Most users + * should pretty much never have to care about this, outside of + * some special use cases. + * + * By default most objects will start out as visible to the + * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the + * backing pages are attached to the object, like in + * __i915_gem_object_set_pages(). They will then be removed the + * shrinker list once the pages are released. + * + * The @shrink_pin is incremented by calling + * i915_gem_object_make_unshrinkable(), which will also remove + * the object from the shrinker list, if the pin count was zero. + * + * Callers will then typically call + * i915_gem_object_make_shrinkable() or + * i915_gem_object_make_purgeable() to decrement the pin count, + * and make the pages visible again. + */ atomic_t shrink_pin; /** + * @ttm_shrinkable: True when the object is using shmem pages + * underneath. Protected by the object lock. + */ + bool ttm_shrinkable; + + /** * Priority list of potential placements for this object. */ struct intel_memory_region **placements; @@ -479,6 +545,7 @@ struct drm_i915_gem_object { */ struct list_head region_link; + struct i915_refct_sgt *rsgt; struct sg_table *pages; void *mapping; @@ -514,7 +581,7 @@ struct drm_i915_gem_object { struct i915_gem_object_page_iter get_dma_page; /** - * Element within i915->mm.unbound_list or i915->mm.bound_list, + * Element within i915->mm.shrink_list or i915->mm.purge_list, * locked by i915->mm.obj_lock. */ struct list_head link; @@ -532,11 +599,19 @@ struct drm_i915_gem_object { } mm; struct { - struct sg_table *cached_io_st; + struct i915_refct_sgt *cached_io_rsgt; struct i915_gem_object_page_iter get_io_page; + struct drm_i915_gem_object *backup; bool created:1; } ttm; + /* + * Record which PXP key instance this object was created against (if + * any), so we can use it to determine if the encryption is valid by + * comparing against the current key instance. + */ + u32 pxp_key_instance; + /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8eb1c3a6fc9c..49c6e55c68ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -26,6 +26,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, /* Make the pages coherent with the GPU (flushing any swapin). */ if (obj->cache_dirty) { + WARN_ON_ONCE(IS_DGFX(i915)); obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(pages); @@ -68,7 +69,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, shrinkable = false; } - if (shrinkable) { + if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { struct list_head *list; unsigned long flags; @@ -158,11 +159,13 @@ retry: } /* Immediately discard the backing storage */ -void i915_gem_object_truncate(struct drm_i915_gem_object *obj) +int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) - obj->ops->truncate(obj); + return obj->ops->truncate(obj); + + return 0; } /* Try to discard unwanted pages */ @@ -208,7 +211,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_WILLNEED; - i915_gem_object_make_unshrinkable(obj); + if (!i915_gem_object_has_self_managed_shrink_list(obj)) + i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { unmap_object(obj, page_mask_bits(obj->mm.mapping)); @@ -414,6 +418,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, } if (!ptr) { + err = i915_gem_object_wait_moving_fence(obj, true); + if (err) { + ptr = ERR_PTR(err); + goto err_unpin; + } + if (GEM_WARN_ON(type == I915_MAP_WC && !static_cpu_has(X86_FEATURE_PAT))) ptr = ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 8b9d7d14c4bd..726b40e1fbb0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_pm.h" +#include "gem/i915_gem_ttm_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" @@ -39,6 +40,88 @@ void i915_gem_suspend(struct drm_i915_private *i915) i915_gem_drain_freed_objects(i915); } +static int lmem_restore(struct drm_i915_private *i915, u32 flags) +{ + struct intel_memory_region *mr; + int ret = 0, id; + + for_each_memory_region(mr, i915, id) { + if (mr->type == INTEL_MEMORY_LOCAL) { + ret = i915_ttm_restore_region(mr, flags); + if (ret) + break; + } + } + + return ret; +} + +static int lmem_suspend(struct drm_i915_private *i915, u32 flags) +{ + struct intel_memory_region *mr; + int ret = 0, id; + + for_each_memory_region(mr, i915, id) { + if (mr->type == INTEL_MEMORY_LOCAL) { + ret = i915_ttm_backup_region(mr, flags); + if (ret) + break; + } + } + + return ret; +} + +static void lmem_recover(struct drm_i915_private *i915) +{ + struct intel_memory_region *mr; + int id; + + for_each_memory_region(mr, i915, id) + if (mr->type == INTEL_MEMORY_LOCAL) + i915_ttm_recover_region(mr); +} + +int i915_gem_backup_suspend(struct drm_i915_private *i915) +{ + int ret; + + /* Opportunistically try to evict unpinned objects */ + ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU); + if (ret) + goto out_recover; + + i915_gem_suspend(i915); + + /* + * More objects may have become unpinned as requests were + * retired. Now try to evict again. The gt may be wedged here + * in which case we automatically fall back to memcpy. + * We allow also backing up pinned objects that have not been + * marked for early recover, and that may contain, for example, + * page-tables for the migrate context. + */ + ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU | + I915_TTM_BACKUP_PINNED); + if (ret) + goto out_recover; + + /* + * Remaining objects are backed up using memcpy once we've stopped + * using the migrate context. + */ + ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED); + if (ret) + goto out_recover; + + return 0; + +out_recover: + lmem_recover(i915); + + return ret; +} + void i915_gem_suspend_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; @@ -128,12 +211,20 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) void i915_gem_resume(struct drm_i915_private *i915) { + int ret; + GEM_TRACE("%s\n", dev_name(i915->drm.dev)); + ret = lmem_restore(i915, 0); + GEM_WARN_ON(ret); + /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ intel_gt_resume(&i915->gt); + + ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU); + GEM_WARN_ON(ret); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h index c9a66630e92e..bedf1e95941a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -18,6 +18,7 @@ void i915_gem_idle_work_handler(struct work_struct *work); void i915_gem_suspend(struct drm_i915_private *i915); void i915_gem_suspend_late(struct drm_i915_private *i915); +int i915_gem_backup_suspend(struct drm_i915_private *i915); int i915_gem_freeze(struct drm_i915_private *i915); int i915_gem_freeze_late(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 1f557b2178ed..a4350227e9ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -11,7 +11,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem) { - obj->mm.region = intel_memory_region_get(mem); + obj->mm.region = mem; mutex_lock(&mem->objects.lock); list_add(&obj->mm.region_link, &mem->objects.list); @@ -25,8 +25,6 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) mutex_lock(&mem->objects.lock); list_del(&obj->mm.region_link); mutex_unlock(&mem->objects.lock); - - intel_memory_region_put(mem); } struct drm_i915_gem_object * @@ -80,3 +78,73 @@ err_object_free: i915_gem_object_free(obj); return ERR_PTR(err); } + +/** + * i915_gem_process_region - Iterate over all objects of a region using ops + * to process and optionally skip objects + * @mr: The memory region + * @apply: ops and private data + * + * This function can be used to iterate over the regions object list, + * checking whether to skip objects, and, if not, lock the objects and + * process them using the supplied ops. Note that this function temporarily + * removes objects from the region list while iterating, so that if run + * concurrently with itself may not iterate over all objects. + * + * Return: 0 if successful, negative error code on failure. + */ +int i915_gem_process_region(struct intel_memory_region *mr, + struct i915_gem_apply_to_region *apply) +{ + const struct i915_gem_apply_to_region_ops *ops = apply->ops; + struct drm_i915_gem_object *obj; + struct list_head still_in_list; + int ret = 0; + + /* + * In the future, a non-NULL apply->ww could mean the caller is + * already in a locking transaction and provides its own context. + */ + GEM_WARN_ON(apply->ww); + + INIT_LIST_HEAD(&still_in_list); + mutex_lock(&mr->objects.lock); + for (;;) { + struct i915_gem_ww_ctx ww; + + obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj), + mm.region_link); + if (!obj) + break; + + list_move_tail(&obj->mm.region_link, &still_in_list); + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + /* + * Note: Someone else might be migrating the object at this + * point. The object's region is not stable until we lock + * the object. + */ + mutex_unlock(&mr->objects.lock); + apply->ww = &ww; + for_i915_gem_ww(&ww, ret, apply->interruptible) { + ret = i915_gem_object_lock(obj, apply->ww); + if (ret) + continue; + + if (obj->mm.region == mr) + ret = ops->process_obj(apply, obj); + /* Implicit object unlock */ + } + + i915_gem_object_put(obj); + mutex_lock(&mr->objects.lock); + if (ret) + break; + } + list_splice_tail(&still_in_list, &mr->objects.list); + mutex_unlock(&mr->objects.lock); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h index 1008e580a89a..fcaa12d657d4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h @@ -12,6 +12,41 @@ struct intel_memory_region; struct drm_i915_gem_object; struct sg_table; +struct i915_gem_apply_to_region; + +/** + * struct i915_gem_apply_to_region_ops - ops to use when iterating over all + * region objects. + */ +struct i915_gem_apply_to_region_ops { + /** + * process_obj - Process the current object + * @apply: Embed this for private data. + * @obj: The current object. + * + * Note that if this function is part of a ww transaction, and + * if returns -EDEADLK for one of the objects, it may be + * rerun for that same object in the same pass. + */ + int (*process_obj)(struct i915_gem_apply_to_region *apply, + struct drm_i915_gem_object *obj); +}; + +/** + * struct i915_gem_apply_to_region - Argument to the struct + * i915_gem_apply_to_region_ops functions. + * @ops: The ops for the operation. + * @ww: Locking context used for the transaction. + * @interruptible: Whether to perform object locking interruptible. + * + * This structure is intended to be embedded in a private struct if needed + */ +struct i915_gem_apply_to_region { + const struct i915_gem_apply_to_region_ops *ops; + struct i915_gem_ww_ctx *ww; + u32 interruptible:1; +}; + void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem); void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); @@ -22,4 +57,6 @@ i915_gem_object_create_region(struct intel_memory_region *mem, resource_size_t page_size, unsigned int flags); +int i915_gem_process_region(struct intel_memory_region *mr, + struct i915_gem_apply_to_region *apply); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 11f072193f3b..cc9fe258fba7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -25,47 +25,54 @@ static void check_release_pagevec(struct pagevec *pvec) cond_resched(); } -static int shmem_get_pages(struct drm_i915_gem_object *obj) +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_memory_region *mem = obj->mm.region; - const unsigned long page_count = obj->base.size / PAGE_SIZE; + struct sgt_iter sgt_iter; + struct pagevec pvec; + struct page *page; + + mapping_clear_unevictable(mapping); + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (dirty) + set_page_dirty(page); + + if (backup) + mark_page_accessed(page); + + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + + sg_free_table(st); +} + +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment) +{ + const unsigned long page_count = size / PAGE_SIZE; unsigned long i; - struct address_space *mapping; - struct sg_table *st; struct scatterlist *sg; - struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment = i915_sg_segment_size(); - unsigned int sg_page_sizes; gfp_t noreclaim; int ret; /* - * Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - - /* * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (obj->base.size > resource_size(&mem->region)) + if (size > resource_size(&mr->region)) return -ENOMEM; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - -rebuild_st: - if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - kfree(st); + if (sg_alloc_table(st, page_count, GFP_KERNEL)) return -ENOMEM; - } /* * Get the list of pages out of our struct file. They'll be pinned @@ -73,14 +80,12 @@ rebuild_st: * * Fail silently without starting the shrinker */ - mapping = obj->base.filp->f_mapping; mapping_set_unevictable(mapping); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, @@ -135,10 +140,9 @@ rebuild_st: if (!i || sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { - if (i) { - sg_page_sizes |= sg->length; + if (i) sg = sg_next(sg); - } + st->nents++; sg_set_page(sg, page, PAGE_SIZE, 0); } else { @@ -149,14 +153,67 @@ rebuild_st: /* Check that the i965g/gm workaround works. */ GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); } - if (sg) { /* loop terminated early; short sg table */ - sg_page_sizes |= sg->length; + if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); - } /* Trim unused sg entries to avoid wasting memory. */ i915_sg_trim(st); + return 0; +err_sg: + sg_mark_end(sg); + if (sg != st->sgl) { + shmem_sg_free_table(st, mapping, false, false); + } else { + mapping_clear_unevictable(mapping); + sg_free_table(st); + } + + /* + * shmemfs first checks if there is enough memory to allocate the page + * and reports ENOSPC should there be insufficient, along with the usual + * ENOMEM for a genuine allocation failure. + * + * We use ENOSPC in our driver to mean that we have run out of aperture + * space and so want to translate the error from shmemfs back to our + * usual understanding of ENOMEM. + */ + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; +} + +static int shmem_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_memory_region *mem = obj->mm.region; + struct address_space *mapping = obj->base.filp->f_mapping; + const unsigned long page_count = obj->base.size / PAGE_SIZE; + unsigned int max_segment = i915_sg_segment_size(); + struct sg_table *st; + struct sgt_iter sgt_iter; + struct page *page; + int ret; + + /* + * Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); + GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + +rebuild_st: + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, + max_segment); + if (ret) + goto err_st; + ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { /* @@ -168,6 +225,7 @@ rebuild_st: for_each_sgt_page(page, sgt_iter, st) put_page(page); sg_free_table(st); + kfree(st); max_segment = PAGE_SIZE; goto rebuild_st; @@ -182,46 +240,15 @@ rebuild_st: if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); - /* - * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it - * possible for userspace to bypass the GTT caching bits set by the - * kernel, as per the given object cache_level. This is troublesome - * since the heavy flush we apply when first gathering the pages is - * skipped if the kernel thinks the object is coherent with the GPU. As - * a result it might be possible to bypass the cache and read the - * contents of the page directly, which could be stale data. If it's - * just a case of userspace shooting themselves in the foot then so be - * it, but since i915 takes the stance of always zeroing memory before - * handing it to userspace, we need to prevent this. - * - * By setting cache_dirty here we make the clflush in set_pages - * unconditional on such platforms. - */ - if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER) + if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); return 0; -err_sg: - sg_mark_end(sg); err_pages: - mapping_clear_unevictable(mapping); - if (sg != st->sgl) { - struct pagevec pvec; - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - } - sg_free_table(st); - kfree(st); - + shmem_sg_free_table(st, mapping, false, false); /* * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual @@ -231,13 +258,16 @@ err_pages: * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ +err_st: if (ret == -ENOSPC) ret = -ENOMEM; + kfree(st); + return ret; } -static void +static int shmem_truncate(struct drm_i915_gem_object *obj) { /* @@ -249,12 +279,12 @@ shmem_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + + return 0; } -static void -shmem_writeback(struct drm_i915_gem_object *obj) +void __shmem_writeback(size_t size, struct address_space *mapping) { - struct address_space *mapping; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, @@ -270,10 +300,9 @@ shmem_writeback(struct drm_i915_gem_object *obj) * instead of invoking writeback so they are aged and paged out * as normal. */ - mapping = obj->base.filp->f_mapping; /* Begin writeback on each dirty page */ - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + for (i = 0; i < size >> PAGE_SHIFT; i++) { struct page *page; page = find_lock_page(mapping, i); @@ -296,11 +325,19 @@ put: } } +static void +shmem_writeback(struct drm_i915_gem_object *obj) +{ + __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); +} + void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); if (obj->mm.madv == I915_MADV_DONTNEED) @@ -312,15 +349,20 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, drm_clflush_sg(pages); __start_cpu_write(obj); + /* + * On non-LLC platforms, force the flush-on-acquire if this is ever + * swapped-in. Our async flush path is not trust worthy enough yet(and + * happens in the wrong order), and with some tricks it's conceivable + * for userspace to change the cache-level to I915_CACHE_NONE after the + * pages are swapped-in, and since execbuf binds the object before doing + * the async flush, we have a race window. + */ + if (!HAS_LLC(i915)) + obj->cache_dirty = true; } void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) { - struct sgt_iter sgt_iter; - struct pagevec pvec; - struct page *page; - - GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev))); __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); @@ -328,25 +370,10 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj, pages); - mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) - set_page_dirty(page); - - if (obj->mm.madv == I915_MADV_WILLNEED) - mark_page_accessed(page); - - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - obj->mm.dirty = false; - - sg_free_table(pages); + shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, + obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); kfree(pages); + obj->mm.dirty = false; } static void @@ -637,9 +664,10 @@ static int init_shmem(struct intel_memory_region *mem) return 0; /* Don't error, we can simply fallback to the kernel mnt */ } -static void release_shmem(struct intel_memory_region *mem) +static int release_shmem(struct intel_memory_region *mem) { i915_gemfs_fini(mem->i915); + return 0; } static const struct intel_memory_region_ops shmem_region_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index e382b7f2353b..157a9765f483 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -15,7 +15,6 @@ #include "gt/intel_gt_requests.h" -#include "dma_resv_utils.h" #include "i915_trace.h" static bool swap_available(void) @@ -56,19 +55,25 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, return false; } -static void try_to_writeback(struct drm_i915_gem_object *obj, - unsigned int flags) +static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags) { + if (obj->ops->shrinker_release_pages) + return obj->ops->shrinker_release_pages(obj, + !(flags & I915_SHRINK_ACTIVE), + flags & I915_SHRINK_WRITEBACK); + switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); - return; + return 0; case __I915_MADV_PURGED: - return; + return 0; } if (flags & I915_SHRINK_WRITEBACK) i915_gem_object_writeback(obj); + + return 0; } /** @@ -118,7 +123,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; - int err; + int err = 0; /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */ bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915); @@ -222,15 +227,13 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, } if (!__i915_gem_object_put_pages(obj)) { - try_to_writeback(obj, shrink); - count += obj->base.size >> PAGE_SHIFT; + if (!try_to_writeback(obj, shrink)) + count += obj->base.size >> PAGE_SHIFT; } if (!ww) i915_gem_object_unlock(obj); } - dma_resv_prune(obj->base.resv); - scanned += obj->base.size >> PAGE_SHIFT; skip: i915_gem_object_put(obj); @@ -242,12 +245,15 @@ skip: list_splice_tail(&still_in_list, phase->list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (err) - return err; + break; } if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(&i915->runtime_pm, wakeref); + if (err) + return err; + if (nr_scanned) *nr_scanned += scanned; return count; @@ -455,6 +461,16 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, #define obj_to_i915(obj__) to_i915((obj__)->base.dev) +/** + * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By + * default all object types that support shrinking(see IS_SHRINKABLE), will also + * make the object visible to the shrinker after allocating the system memory + * pages. + * @obj: The GEM object. + * + * This is typically used for special kernel internal objects that can't be + * easily processed by the shrinker, like if they are perma-pinned. + */ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); @@ -479,13 +495,12 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } -static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, - struct list_head *head) +static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, + struct list_head *head) { struct drm_i915_private *i915 = obj_to_i915(obj); unsigned long flags; - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); if (!i915_gem_object_is_shrinkable(obj)) return; @@ -505,14 +520,67 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } +/** + * __i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.shrink_list); +} + +/** + * __i915_gem_object_make_purgeable - Move the object to the tail of the + * purgeable list. Objects on this list might be swapped out. Used with + * DONTNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.purge_list); +} + +/** + * i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.shrink_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_shrinkable(obj); } +/** + * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable + * list. Used with DONTNEED objects. Unlike with shrinkable objects, the + * shrinker will attempt to discard the backing pages, instead of trying to swap + * them out. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.purge_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_purgeable(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index ddd37ccb1362..bce03d74a0b4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) return 0; } - if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) { + if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); @@ -720,9 +720,10 @@ static int init_stolen_smem(struct intel_memory_region *mem) return i915_gem_init_stolen(mem); } -static void release_stolen_smem(struct intel_memory_region *mem) +static int release_stolen_smem(struct intel_memory_region *mem) { i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { @@ -759,10 +760,11 @@ err_fini: return err; } -static void release_stolen_lmem(struct intel_memory_region *mem) +static int release_stolen_lmem(struct intel_memory_region *mem) { io_mapping_fini(&mem->iomap); i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 8beef57ba52b..218a9b3037c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -10,18 +10,12 @@ #include "intel_memory_region.h" #include "intel_region_ttm.h" +#include "gem/i915_gem_mman.h" #include "gem/i915_gem_object.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" -#include "gem/i915_gem_mman.h" - -#include "gt/intel_migrate.h" -#include "gt/intel_engine_pm.h" - -#define I915_PL_LMEM0 TTM_PL_PRIV -#define I915_PL_SYSTEM TTM_PL_SYSTEM -#define I915_PL_STOLEN TTM_PL_VRAM -#define I915_PL_GGTT TTM_PL_TT +#include "gem/i915_gem_ttm_move.h" +#include "gem/i915_gem_ttm_pm.h" #define I915_TTM_PRIO_PURGE 0 #define I915_TTM_PRIO_NO_PAGES 1 @@ -36,7 +30,9 @@ * struct i915_ttm_tt - TTM page vector with additional private information * @ttm: The base TTM page vector. * @dev: The struct device used for dma mapping and unmapping. - * @cached_st: The cached scatter-gather table. + * @cached_rsgt: The cached scatter-gather table. + * @is_shmem: Set if using shmem. + * @filp: The shmem file, if using shmem backend. * * Note that DMA may be going on right up to the point where the page- * vector is unpopulated in delayed destroy. Hence keep the @@ -47,7 +43,10 @@ struct i915_ttm_tt { struct ttm_tt ttm; struct device *dev; - struct sg_table *cached_st; + struct i915_refct_sgt cached_rsgt; + + bool is_shmem; + struct file *filp; }; static const struct ttm_place sys_placement_flags = { @@ -64,6 +63,20 @@ static struct ttm_placement i915_sys_placement = { .busy_placement = &sys_placement_flags, }; +/** + * i915_ttm_sys_placement - Return the struct ttm_placement to be + * used for an object in system memory. + * + * Rather than making the struct extern, use this + * function. + * + * Return: A pointer to a static variable for sys placement. + */ +struct ttm_placement *i915_ttm_sys_placement(void) +{ + return &i915_sys_placement; +} + static int i915_ttm_err_to_gem(int err) { /* Fastpath */ @@ -91,37 +104,15 @@ static int i915_ttm_err_to_gem(int err) return err; } -static bool gpu_binds_iomem(struct ttm_resource *mem) -{ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static bool cpu_maps_iomem(struct ttm_resource *mem) -{ - /* Once / if we support GGTT, this is also false for cached ttm_tts */ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static enum i915_cache_level -i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, - struct ttm_tt *ttm) -{ - return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) && - ttm->caching == ttm_cached) ? I915_CACHE_LLC : - I915_CACHE_NONE; -} - -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); - static enum ttm_caching i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) { /* - * Objects only allowed in system get cached cpu-mappings. - * Other objects get WC mapping for now. Even if in system. + * Objects only allowed in system get cached cpu-mappings, or when + * evicting lmem-only buffers to system for swapping. Other objects get + * WC mapping for now. Even if in system. */ - if (obj->mm.region->type == INTEL_MEMORY_SYSTEM && - obj->mm.n_placements <= 1) + if (obj->mm.n_placements <= 1) return ttm_cached; return ttm_write_combined; @@ -167,15 +158,103 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, placement->busy_placement = busy; } +static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + const unsigned int max_segment = i915_sg_segment_size(); + const size_t size = ttm->num_pages << PAGE_SHIFT; + struct file *filp = i915_tt->filp; + struct sgt_iter sgt_iter; + struct sg_table *st; + struct page *page; + unsigned long i; + int err; + + if (!filp) { + struct address_space *mapping; + gfp_t mask; + + filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + + mapping = filp->f_mapping; + mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); + + i915_tt->filp = filp; + } + + st = &i915_tt->cached_rsgt.table; + err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, + max_segment); + if (err) + return err; + + err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (err) + goto err_free_st; + + i = 0; + for_each_sgt_page(page, sgt_iter, st) + ttm->pages[i++] = page; + + if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) + ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + + return 0; + +err_free_st: + shmem_sg_free_table(st, filp->f_mapping, false, false); + + return err; +} + +static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; + struct sg_table *st = &i915_tt->cached_rsgt.table; + + shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, + backup, backup); +} + +static void i915_ttm_tt_release(struct kref *ref) +{ + struct i915_ttm_tt *i915_tt = + container_of(ref, typeof(*i915_tt), cached_rsgt.kref); + struct sg_table *st = &i915_tt->cached_rsgt.table; + + GEM_WARN_ON(st->sgl); + + kfree(i915_tt); +} + +static const struct i915_refct_sgt_ops tt_rsgt_ops = { + .release = i915_ttm_tt_release +}; + static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->resource->mem_type); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + enum ttm_caching caching; struct i915_ttm_tt *i915_tt; int ret; + if (!obj) + return NULL; + i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); if (!i915_tt) return NULL; @@ -184,38 +263,66 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, man->use_tt) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; - ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, - i915_ttm_select_tt_caching(obj)); - if (ret) { - kfree(i915_tt); - return NULL; + caching = i915_ttm_select_tt_caching(obj); + if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { + page_flags |= TTM_TT_FLAG_EXTERNAL | + TTM_TT_FLAG_EXTERNAL_MAPPABLE; + i915_tt->is_shmem = true; } + ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching); + if (ret) + goto err_free; + + __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, + &tt_rsgt_ops); + i915_tt->dev = obj->base.dev->dev; return &i915_tt->ttm; + +err_free: + kfree(i915_tt); + return NULL; +} + +static int i915_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + + if (i915_tt->is_shmem) + return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); + + return ttm_pool_alloc(&bdev->pool, ttm, ctx); } static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + struct sg_table *st = &i915_tt->cached_rsgt.table; - if (i915_tt->cached_st) { - dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st, - DMA_BIDIRECTIONAL, 0); - sg_free_table(i915_tt->cached_st); - kfree(i915_tt->cached_st); - i915_tt->cached_st = NULL; + if (st->sgl) + dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); + + if (i915_tt->is_shmem) { + i915_ttm_tt_shmem_unpopulate(ttm); + } else { + sg_free_table(st); + ttm_pool_free(&bdev->pool, ttm); } - ttm_pool_free(&bdev->pool, ttm); } static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + if (i915_tt->filp) + fput(i915_tt->filp); + ttm_tt_fini(ttm); - kfree(i915_tt); + i915_refct_sgt_put(&i915_tt->cached_rsgt); } static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, @@ -223,6 +330,17 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + if (!obj) + return false; + + /* + * EXTERNAL objects should never be swapped out by TTM, instead we need + * to handle that ourselves. TTM will already skip such objects for us, + * but we would like to avoid grabbing locks for no good reason. + */ + if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) + return false; + /* Will do for now. Our pinned objects are still on TTM's LRU lists */ return i915_gem_object_evictable(obj); } @@ -233,28 +351,19 @@ static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, *placement = i915_sys_placement; } -static int i915_ttm_move_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret; - - ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); - if (ret) - return ret; - - ret = __i915_gem_object_put_pages(obj); - if (ret) - return ret; - - return 0; -} - -static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) +/** + * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information + * @obj: The GEM object + * This function frees any LMEM-related information that is cached on + * the object. For example the radix tree for fast page lookup and the + * cached refcounted sg-table + */ +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; - if (!obj->ttm.cached_io_st) + if (!obj->ttm.cached_io_rsgt) return; rcu_read_lock(); @@ -262,93 +371,106 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); rcu_read_unlock(); - sg_free_table(obj->ttm.cached_io_st); - kfree(obj->ttm.cached_io_st); - obj->ttm.cached_io_st = NULL; + i915_refct_sgt_put(obj->ttm.cached_io_rsgt); + obj->ttm.cached_io_rsgt = NULL; } -static void -i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +/** + * i915_ttm_purge - Clear an object of its memory + * @obj: The object + * + * This function is called to clear an object of it's memory when it is + * marked as not needed anymore. + * + * Return: 0 on success, negative error code on failure. + */ +int i915_ttm_purge(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = false, + }; + struct ttm_placement place = {}; + int ret; - if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { - obj->write_domain = I915_GEM_DOMAIN_WC; - obj->read_domains = I915_GEM_DOMAIN_WC; - } else { - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - } -} + if (obj->mm.madv == __I915_MADV_PURGED) + return 0; -static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) -{ - struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - unsigned int cache_level; - unsigned int i; + ret = ttm_bo_validate(bo, &place, &ctx); + if (ret) + return ret; - /* - * If object was moved to an allowable region, update the object - * region to consider it migrated. Note that if it's currently not - * in an allowable region, it's evicted and we don't update the - * object region. - */ - if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { - for (i = 0; i < obj->mm.n_placements; ++i) { - struct intel_memory_region *mr = obj->mm.placements[i]; - - if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && - mr != obj->mm.region) { - i915_gem_object_release_memory_region(obj); - i915_gem_object_init_memory_region(obj, mr); - break; - } - } + if (bo->ttm && i915_tt->filp) { + /* + * The below fput(which eventually calls shmem_truncate) might + * be delayed by worker, so when directly called to purge the + * pages(like by the shrinker) we should try to be more + * aggressive and release the pages immediately. + */ + shmem_truncate_range(file_inode(i915_tt->filp), + 0, (loff_t)-1); + fput(fetch_and_zero(&i915_tt->filp)); } - obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); - - obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : - I915_BO_FLAG_STRUCT_PAGE; + obj->write_domain = 0; + obj->read_domains = 0; + i915_ttm_adjust_gem_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + obj->mm.madv = __I915_MADV_PURGED; - cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, - bo->ttm); - i915_gem_object_set_cache_coherency(obj, cache_level); + return 0; } -static void i915_ttm_purge(struct drm_i915_gem_object *obj) +static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj, + bool no_wait_gpu, + bool should_writeback) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); struct ttm_operation_ctx ctx = { .interruptible = true, - .no_wait_gpu = false, + .no_wait_gpu = no_wait_gpu, }; struct ttm_placement place = {}; int ret; - if (obj->mm.madv == __I915_MADV_PURGED) - return; + if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + return 0; + + GEM_BUG_ON(!i915_tt->is_shmem); - /* TTM's purge interface. Note that we might be reentering. */ + if (!i915_tt->filp) + return 0; + + ret = ttm_bo_wait_ctx(bo, &ctx); + if (ret) + return ret; + + switch (obj->mm.madv) { + case I915_MADV_DONTNEED: + return i915_ttm_purge(obj); + case __I915_MADV_PURGED: + return 0; + } + + if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) + return 0; + + bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; ret = ttm_bo_validate(bo, &place, &ctx); - if (!ret) { - obj->write_domain = 0; - obj->read_domains = 0; - i915_ttm_adjust_gem_after_move(obj); - i915_ttm_free_cached_io_st(obj); - obj->mm.madv = __I915_MADV_PURGED; + if (ret) { + bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + return ret; } -} -static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret = i915_ttm_move_notify(bo); + if (should_writeback) + __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); - GEM_WARN_ON(ret); - GEM_WARN_ON(obj->ttm.cached_io_st); - if (!ret && obj->mm.madv != I915_MADV_WILLNEED) - i915_ttm_purge(obj); + return 0; } static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) @@ -356,219 +478,102 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); if (likely(obj)) { - /* This releases all gem object bindings to the backend. */ - i915_ttm_free_cached_io_st(obj); - __i915_gem_free_object(obj); + __i915_gem_object_pages_fini(obj); + i915_ttm_free_cached_io_rsgt(obj); } } -static struct intel_memory_region * -i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) -{ - struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); - - /* There's some room for optimization here... */ - GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && - ttm_mem_type < I915_PL_LMEM0); - if (ttm_mem_type == I915_PL_SYSTEM) - return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, - 0); - - return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, - ttm_mem_type - I915_PL_LMEM0); -} - -static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm) +static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct sg_table *st; int ret; - if (i915_tt->cached_st) - return i915_tt->cached_st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); + if (i915_tt->cached_rsgt.table.sgl) + return i915_refct_sgt_get(&i915_tt->cached_rsgt); + st = &i915_tt->cached_rsgt.table; ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, i915_sg_segment_size(), GFP_KERNEL); if (ret) { - kfree(st); + st->sgl = NULL; return ERR_PTR(ret); } ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); if (ret) { sg_free_table(st); - kfree(st); return ERR_PTR(ret); } - i915_tt->cached_st = st; - return st; + return i915_refct_sgt_get(&i915_tt->cached_rsgt); } -static struct sg_table * +/** + * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the + * resource memory + * @obj: The GEM object used for sg-table caching + * @res: The struct ttm_resource for which an sg-table is requested. + * + * This function returns a refcounted sg-table representing the memory + * pointed to by @res. If @res is the object's current resource it may also + * cache the sg_table on the object or attempt to access an already cached + * sg-table. The refcounted sg-table needs to be put when no-longer in use. + * + * Return: A valid pointer to a struct i915_refct_sgt or error pointer on + * failure. + */ +struct i915_refct_sgt * i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct ttm_resource *res) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - if (!gpu_binds_iomem(res)) + if (!i915_ttm_gtt_binds_lmem(res)) return i915_ttm_tt_get_st(bo->ttm); /* * If CPU mapping differs, we need to add the ttm_tt pages to * the resulting st. Might make sense for GGTT. */ - GEM_WARN_ON(!cpu_maps_iomem(res)); - return intel_region_ttm_resource_to_st(obj->mm.region, res); -} - -static int i915_ttm_accel_move(struct ttm_buffer_object *bo, - struct ttm_resource *dst_mem, - struct sg_table *dst_st) -{ - struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), - bdev); - struct ttm_resource_manager *src_man = - ttm_manager_type(bo->bdev, bo->resource->mem_type); - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct sg_table *src_st; - struct i915_request *rq; - struct ttm_tt *ttm = bo->ttm; - enum i915_cache_level src_level, dst_level; - int ret; - - if (!i915->gt.migrate.context) - return -EINVAL; - - dst_level = i915_ttm_cache_level(i915, dst_mem, ttm); - if (!ttm || !ttm_tt_is_populated(ttm)) { - if (bo->type == ttm_bo_type_kernel) - return -EINVAL; + GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res)); + if (bo->resource == res) { + if (!obj->ttm.cached_io_rsgt) { + struct i915_refct_sgt *rsgt; - if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)) - return 0; + rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + res); + if (IS_ERR(rsgt)) + return rsgt; - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL, - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - 0, &rq); - - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); - } - intel_engine_pm_put(i915->gt.migrate.context->engine); - } else { - src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) : - obj->ttm.cached_io_st; - - src_level = i915_ttm_cache_level(i915, bo->resource, ttm); - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_copy(i915->gt.migrate.context, - NULL, src_st->sgl, src_level, - gpu_binds_iomem(bo->resource), - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - &rq); - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); + obj->ttm.cached_io_rsgt = rsgt; } - intel_engine_pm_put(i915->gt.migrate.context->engine); + return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); } - return ret; + return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); } -static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *dst_mem, - struct ttm_place *hop) +static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct ttm_resource_manager *dst_man = - ttm_manager_type(bo->bdev, dst_mem->mem_type); - struct intel_memory_region *dst_reg, *src_reg; - union { - struct ttm_kmap_iter_tt tt; - struct ttm_kmap_iter_iomap io; - } _dst_iter, _src_iter; - struct ttm_kmap_iter *dst_iter, *src_iter; - struct sg_table *dst_st; int ret; - dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); - src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); - GEM_BUG_ON(!dst_reg || !src_reg); - - /* Sync for now. We could do the actual copy async. */ - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - return ret; + if (!obj) + return; ret = i915_ttm_move_notify(bo); - if (ret) - return ret; - - if (obj->mm.madv != I915_MADV_WILLNEED) { + GEM_WARN_ON(ret); + GEM_WARN_ON(obj->ttm.cached_io_rsgt); + if (!ret && obj->mm.madv != I915_MADV_WILLNEED) i915_ttm_purge(obj); - ttm_resource_free(bo, &dst_mem); - return 0; - } - - /* Populate ttm with pages if needed. Typically system memory. */ - if (bo->ttm && (dst_man->use_tt || - (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); - if (ret) - return ret; - } - - dst_st = i915_ttm_resource_get_st(obj, dst_mem); - if (IS_ERR(dst_st)) - return PTR_ERR(dst_st); - - ret = i915_ttm_accel_move(bo, dst_mem, dst_st); - if (ret) { - /* If we start mapping GGTT, we can no longer use man::use_tt here. */ - dst_iter = !cpu_maps_iomem(dst_mem) ? - ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) : - ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, - dst_st, dst_reg->region.start); - - src_iter = !cpu_maps_iomem(bo->resource) ? - ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) : - ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap, - obj->ttm.cached_io_st, - src_reg->region.start); - - ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter); - } - /* Below dst_mem becomes bo->resource. */ - ttm_bo_move_sync_cleanup(bo, dst_mem); - i915_ttm_adjust_domains_after_move(obj); - i915_ttm_free_cached_io_st(obj); - - if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) { - obj->ttm.cached_io_st = dst_st; - obj->ttm.get_io_page.sg_pos = dst_st->sgl; - obj->ttm.get_io_page.sg_idx = 0; - } - - i915_ttm_adjust_gem_after_move(obj); - return 0; } static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { - if (!cpu_maps_iomem(mem)) + if (!i915_ttm_cpu_maps_iomem(mem)) return 0; mem->bus.caching = ttm_write_combined; @@ -581,19 +586,26 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; struct scatterlist *sg; + unsigned long base; unsigned int ofs; + GEM_BUG_ON(!obj); GEM_WARN_ON(bo->ttm); + base = obj->mm.region->iomap.base - obj->mm.region->region.start; sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true); return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; } +/* + * All callbacks need to take care not to downcast a struct ttm_buffer_object + * without checking its subclass, since it might be a TTM ghost object. + */ static struct ttm_device_funcs i915_ttm_bo_driver = { .ttm_tt_create = i915_ttm_tt_create, + .ttm_tt_populate = i915_ttm_tt_populate, .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, .ttm_tt_destroy = i915_ttm_tt_destroy, .eviction_valuable = i915_ttm_eviction_valuable, @@ -623,7 +635,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - struct sg_table *st; int real_num_busy; int ret; @@ -650,7 +661,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, return i915_ttm_err_to_gem(ret); } - i915_ttm_adjust_lru(obj); if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); if (ret) @@ -661,14 +671,19 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, } if (!i915_gem_object_has_pages(obj)) { - /* Object either has a page vector or is an iomem object */ - st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st; - if (IS_ERR(st)) - return PTR_ERR(st); + struct i915_refct_sgt *rsgt = + i915_ttm_resource_get_st(obj, bo->resource); - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + if (IS_ERR(rsgt)) + return PTR_ERR(rsgt); + + GEM_BUG_ON(obj->mm.rsgt); + obj->mm.rsgt = rsgt; + __i915_gem_object_set_pages(obj, &rsgt->table, + i915_sg_dma_sizes(rsgt->table.sgl)); } + i915_ttm_adjust_lru(obj); return ret; } @@ -740,12 +755,21 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, * and shrinkers will move it out if needed. */ - i915_ttm_adjust_lru(obj); + if (obj->mm.rsgt) + i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); } -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) +/** + * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. + * @obj: The object + */ +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + bool shrinkable = + bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); /* * Don't manipulate the TTM LRUs while in TTM bo destruction. @@ -755,10 +779,53 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) return; /* + * We skip managing the shrinker LRU in set_pages() and just manage + * everything here. This does at least solve the issue with having + * temporary shmem mappings(like with evicted lmem) not being visible to + * the shrinker. Only our shmem objects are shrinkable, everything else + * we keep as unshrinkable. + * + * To make sure everything plays nice we keep an extra shrink pin in TTM + * if the underlying pages are not currently shrinkable. Once we release + * our pin, like when the pages are moved to shmem, the pages will then + * be added to the shrinker LRU, assuming the caller isn't also holding + * a pin. + * + * TODO: consider maybe also bumping the shrinker list here when we have + * already unpinned it, which should give us something more like an LRU. + * + * TODO: There is a small window of opportunity for this function to + * get called from eviction after we've dropped the last GEM refcount, + * but before the TTM deleted flag is set on the object. Avoid + * adjusting the shrinker list in such cases, since the object is + * not available to the shrinker anyway due to its zero refcount. + * To fix this properly we should move to a TTM shrinker LRU list for + * these objects. + */ + if (kref_get_unless_zero(&obj->base.refcount)) { + if (shrinkable != obj->mm.ttm_shrinkable) { + if (shrinkable) { + if (obj->mm.madv == I915_MADV_WILLNEED) + __i915_gem_object_make_shrinkable(obj); + else + __i915_gem_object_make_purgeable(obj); + } else { + i915_gem_object_make_unshrinkable(obj); + } + + obj->mm.ttm_shrinkable = shrinkable; + } + i915_gem_object_put(obj); + } + + /* * Put on the correct LRU list depending on the MADV status */ spin_lock(&bo->bdev->lru_lock); - if (obj->mm.madv != I915_MADV_WILLNEED) { + if (shrinkable) { + /* Try to keep shmem_tt from being considered for shrinking. */ + bo->priority = TTM_MAX_BO_PRIORITY - 1; + } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { if (bo->priority < I915_TTM_PRIO_HAS_PAGES) @@ -789,26 +856,47 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) */ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) { - if (obj->ttm.created) { - ttm_bo_put(i915_gem_to_ttm(obj)); - } else { - __i915_gem_free_object(obj); - call_rcu(&obj->rcu, __i915_gem_free_object_rcu); - } + GEM_BUG_ON(!obj->ttm.created); + + ttm_bo_put(i915_gem_to_ttm(obj)); } static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) { struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = - i915_ttm_to_gem(area->vm_private_data); + struct ttm_buffer_object *bo = area->vm_private_data; + struct drm_device *dev = bo->base.dev; + struct drm_i915_gem_object *obj; + vm_fault_t ret; + int idx; + + obj = i915_ttm_to_gem(bo); + if (!obj) + return VM_FAULT_SIGBUS; /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; - return ttm_bo_vm_fault(vmf); + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(dev, &idx)) { + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + i915_ttm_adjust_lru(obj); + + dma_resv_unlock(bo->base.resv); + return ret; } static int @@ -859,13 +947,18 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", + .flags = I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, .truncate = i915_ttm_purge, + .shrinker_release_pages = i915_ttm_shrinker_release_pages, + .adjust_lru = i915_ttm_adjust_lru, .delayed_free = i915_ttm_delayed_free, .migrate = i915_ttm_migrate, + .mmap_offset = i915_ttm_mmap_offset, .mmap_ops = &vm_ops_ttm, }; @@ -876,8 +969,29 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) i915_gem_object_release_memory_region(obj); mutex_destroy(&obj->ttm.get_io_page.lock); - if (obj->ttm.created) + + if (obj->ttm.created) { + /* + * We freely manage the shrinker LRU outide of the mm.pages life + * cycle. As a result when destroying the object we should be + * extra paranoid and ensure we remove it from the LRU, before + * we free the object. + * + * Touching the ttm_shrinkable outside of the object lock here + * should be safe now that the last GEM object ref was dropped. + */ + if (obj->mm.ttm_shrinkable) + i915_gem_object_make_unshrinkable(obj); + + i915_ttm_backup_free(obj); + + /* This releases all gem object bindings to the backend. */ + __i915_gem_free_object(obj); + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); + } else { + __i915_gem_object_fini(obj); + } } /** @@ -906,8 +1020,11 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); - i915_gem_object_init_memory_region(obj, mem); - i915_gem_object_make_unshrinkable(obj); + + /* Don't put on a region list until we're either locked or fully initialized. */ + obj->mm.region = mem; + INIT_LIST_HEAD(&obj->mm.region_link); + INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->ttm.get_io_page.lock); bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : @@ -919,6 +1036,14 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, GEM_BUG_ON(page_size && obj->mm.n_placements); /* + * Keep an extra shrink pin to prevent the object from being made + * shrinkable too early. If the ttm_tt is ever allocated in shmem, we + * drop the pin. The TTM backend manages the shrinker LRU itself, + * outside of the normal mm.pages life cycle. + */ + i915_gem_object_make_unshrinkable(obj); + + /* * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. @@ -933,6 +1058,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, return i915_ttm_err_to_gem(ret); obj->ttm.created = true; + i915_gem_object_release_memory_region(obj); + i915_gem_object_init_memory_region(obj, mem); i915_ttm_adjust_domains_after_move(obj); i915_ttm_adjust_gem_after_move(obj); i915_gem_object_unlock(obj); @@ -942,6 +1069,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, static const struct intel_memory_region_ops ttm_system_region_ops = { .init_object = __i915_gem_ttm_object_init, + .release = intel_region_ttm_fini, }; struct intel_memory_region * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index 40927f67b6d9..9d698ad00853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -5,6 +5,8 @@ #ifndef _I915_GEM_TTM_H_ #define _I915_GEM_TTM_H_ +#include <drm/ttm/ttm_placement.h> + #include "gem/i915_gem_object_types.h" /** @@ -35,7 +37,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo); static inline struct drm_i915_gem_object * i915_ttm_to_gem(struct ttm_buffer_object *bo) { - if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy)) + if (bo->destroy != i915_ttm_bo_destroy) return NULL; return container_of(bo, struct drm_i915_gem_object, __do_not_access); @@ -46,4 +48,47 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, resource_size_t size, resource_size_t page_size, unsigned int flags); + +/* Internal I915 TTM declarations and definitions below. */ + +#define I915_PL_LMEM0 TTM_PL_PRIV +#define I915_PL_SYSTEM TTM_PL_SYSTEM +#define I915_PL_STOLEN TTM_PL_VRAM +#define I915_PL_GGTT TTM_PL_TT + +struct ttm_placement *i915_ttm_sys_placement(void); + +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj); + +struct i915_refct_sgt * +i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, + struct ttm_resource *res); + +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); + +int i915_ttm_purge(struct drm_i915_gem_object *obj); + +/** + * i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as LMEM for GTT binding purposes, + * false otherwise. + */ +static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem) +{ + return mem->mem_type != I915_PL_SYSTEM; +} + +/** + * i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as IOMEM for CPU mapping purposes. + */ +static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem) +{ + /* Once / if we support GGTT, this is also false for cached ttm_tts */ + return mem->mem_type != I915_PL_SYSTEM; +} #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c new file mode 100644 index 000000000000..80df9f592407 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -0,0 +1,874 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/dma-fence-array.h> + +#include <drm/ttm/ttm_bo_driver.h> + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "intel_region_ttm.h" + +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" + +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" +#include "gt/intel_migrate.h" + +/** + * DOC: Selftest failure modes for failsafe migration: + * + * For fail_gpu_migration, the gpu blit scheduled is always a clear blit + * rather than a copy blit, and then we force the failure paths as if + * the blit fence returned an error. + * + * For fail_work_allocation we fail the kmalloc of the async worker, we + * sync the gpu blit. If it then fails, or fail_gpu_migration is set to + * true, then a memcpy operation is performed sync. + */ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +static bool fail_gpu_migration; +static bool fail_work_allocation; + +void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation) +{ + fail_gpu_migration = gpu_migration; + fail_work_allocation = work_allocation; +} +#endif + +/** + * DOC: Set of utilities to dynamically collect dependencies and + * eventually coalesce them into a single fence which is fed into + * the GT migration code, since it only accepts a single dependency + * fence. + * The single fence returned from these utilities, in the case of + * dependencies from multiple fence contexts, a struct dma_fence_array, + * since the i915 request code can break that up and await the individual + * fences. + * + * Once we can do async unbinding, this is also needed to coalesce + * the migration fence with the unbind fences. + * + * While collecting the individual dependencies, we store the refcounted + * struct dma_fence pointers in a realloc-managed pointer array, since + * that can be easily fed into a dma_fence_array. Other options are + * available, like for example an xarray for similarity with drm/sched. + * Can be changed easily if needed. + * + * A struct i915_deps need to be initialized using i915_deps_init(). + * If i915_deps_add_dependency() or i915_deps_add_resv() return an + * error code they will internally call i915_deps_fini(), which frees + * all internal references and allocations. After a call to + * i915_deps_to_fence(), or i915_deps_sync(), the struct should similarly + * be viewed as uninitialized. + * + * We might want to break this out into a separate file as a utility. + */ + +#define I915_DEPS_MIN_ALLOC_CHUNK 8U + +/** + * struct i915_deps - Collect dependencies into a single dma-fence + * @single: Storage for pointer if the collection is a single fence. + * @fence: Allocated array of fence pointers if more than a single fence; + * otherwise points to the address of @single. + * @num_deps: Current number of dependency fences. + * @fences_size: Size of the @fences array in number of pointers. + * @gfp: Allocation mode. + */ +struct i915_deps { + struct dma_fence *single; + struct dma_fence **fences; + unsigned int num_deps; + unsigned int fences_size; + gfp_t gfp; +}; + +static void i915_deps_reset_fences(struct i915_deps *deps) +{ + if (deps->fences != &deps->single) + kfree(deps->fences); + deps->num_deps = 0; + deps->fences_size = 1; + deps->fences = &deps->single; +} + +static void i915_deps_init(struct i915_deps *deps, gfp_t gfp) +{ + deps->fences = NULL; + deps->gfp = gfp; + i915_deps_reset_fences(deps); +} + +static void i915_deps_fini(struct i915_deps *deps) +{ + unsigned int i; + + for (i = 0; i < deps->num_deps; ++i) + dma_fence_put(deps->fences[i]); + + if (deps->fences != &deps->single) + kfree(deps->fences); +} + +static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + int ret; + + if (deps->num_deps >= deps->fences_size) { + unsigned int new_size = 2 * deps->fences_size; + struct dma_fence **new_fences; + + new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK); + new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp); + if (!new_fences) + goto sync; + + memcpy(new_fences, deps->fences, + deps->fences_size * sizeof(*new_fences)); + swap(new_fences, deps->fences); + if (new_fences != &deps->single) + kfree(new_fences); + deps->fences_size = new_size; + } + deps->fences[deps->num_deps++] = dma_fence_get(fence); + return 0; + +sync: + if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) { + ret = -EBUSY; + goto unref; + } + + ret = dma_fence_wait(fence, ctx->interruptible); + if (ret) + goto unref; + + ret = fence->error; + if (ret) + goto unref; + + return 0; + +unref: + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_sync(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence **fences = deps->fences; + unsigned int i; + int ret = 0; + + for (i = 0; i < deps->num_deps; ++i, ++fences) { + if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) { + ret = -EBUSY; + break; + } + + ret = dma_fence_wait(*fences, ctx->interruptible); + if (!ret) + ret = (*fences)->error; + if (ret) + break; + } + + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_add_dependency(struct i915_deps *deps, + struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + unsigned int i; + int ret; + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) { + ret = fence->error; + if (ret) + i915_deps_fini(deps); + return ret; + } + + for (i = 0; i < deps->num_deps; ++i) { + struct dma_fence *entry = deps->fences[i]; + + if (!entry->context || entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + deps->fences[i] = dma_fence_get(fence); + } + + return 0; + } + + return i915_deps_grow(deps, fence, ctx); +} + +static struct dma_fence *i915_deps_to_fence(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence_array *array; + + if (deps->num_deps == 0) + return NULL; + + if (deps->num_deps == 1) { + deps->num_deps = 0; + return deps->fences[0]; + } + + /* + * TODO: Alter the allocation mode here to not try too hard to + * make things async. + */ + array = dma_fence_array_create(deps->num_deps, deps->fences, 0, 0, + false); + if (!array) + return ERR_PTR(i915_deps_sync(deps, ctx)); + + deps->fences = NULL; + i915_deps_reset_fences(deps); + + return &array->base; +} + +static int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, + bool all, const bool no_excl, + const struct ttm_operation_ctx *ctx) +{ + struct dma_resv_iter iter; + struct dma_fence *fence; + + dma_resv_assert_held(resv); + dma_resv_for_each_fence(&iter, resv, all, fence) { + int ret; + + if (no_excl && dma_resv_iter_is_exclusive(&iter)) + continue; + + ret = i915_deps_add_dependency(deps, fence, ctx); + if (ret) + return ret; + } + + return 0; +} + +static enum i915_cache_level +i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, + struct ttm_tt *ttm) +{ + return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && + !i915_ttm_gtt_binds_lmem(res) && + ttm->caching == ttm_cached) ? I915_CACHE_LLC : + I915_CACHE_NONE; +} + +static struct intel_memory_region * +i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + + /* There's some room for optimization here... */ + GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && + ttm_mem_type < I915_PL_LMEM0); + if (ttm_mem_type == I915_PL_SYSTEM) + return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, + 0); + + return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, + ttm_mem_type - I915_PL_LMEM0); +} + +/** + * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a + * TTM move + * @obj: The gem object + */ +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + + if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { + obj->write_domain = I915_GEM_DOMAIN_WC; + obj->read_domains = I915_GEM_DOMAIN_WC; + } else { + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + } +} + +/** + * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move + * @obj: The gem object + * + * Adjusts the GEM object's region, mem_flags and cache coherency after a + * TTM move. + */ +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + unsigned int cache_level; + unsigned int i; + + /* + * If object was moved to an allowable region, update the object + * region to consider it migrated. Note that if it's currently not + * in an allowable region, it's evicted and we don't update the + * object region. + */ + if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { + for (i = 0; i < obj->mm.n_placements; ++i) { + struct intel_memory_region *mr = obj->mm.placements[i]; + + if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && + mr != obj->mm.region) { + i915_gem_object_release_memory_region(obj); + i915_gem_object_init_memory_region(obj, mr); + break; + } + } + } + + obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); + + obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : + I915_BO_FLAG_STRUCT_PAGE; + + cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, + bo->ttm); + i915_gem_object_set_cache_coherency(obj, cache_level); +} + +/** + * i915_ttm_move_notify - Prepare an object for move + * @bo: The ttm buffer object. + * + * This function prepares an object for move by removing all GPU bindings, + * removing all CPU mapings and finally releasing the pages sg-table. + * + * Return: 0 if successful, negative error code on error. + */ +int i915_ttm_move_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + int ret; + + ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (ret) + return ret; + + ret = __i915_gem_object_put_pages(obj); + if (ret) + return ret; + + return 0; +} + +static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, + bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct sg_table *dst_st, + struct dma_fence *dep) +{ + struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), + bdev); + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct i915_request *rq; + struct ttm_tt *src_ttm = bo->ttm; + enum i915_cache_level src_level, dst_level; + int ret; + + if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) + return ERR_PTR(-EINVAL); + + /* With fail_gpu_migration, we always perform a GPU clear. */ + if (I915_SELFTEST_ONLY(fail_gpu_migration)) + clear = true; + + dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); + if (clear) { + if (bo->type == ttm_bo_type_kernel && + !I915_SELFTEST_ONLY(fail_gpu_migration)) + return ERR_PTR(-EINVAL); + + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_clear(i915->gt.migrate.context, dep, + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + 0, &rq); + } else { + struct i915_refct_sgt *src_rsgt = + i915_ttm_resource_get_st(obj, bo->resource); + + if (IS_ERR(src_rsgt)) + return ERR_CAST(src_rsgt); + + src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_copy(i915->gt.migrate.context, + dep, src_rsgt->table.sgl, + src_level, + i915_ttm_gtt_binds_lmem(bo->resource), + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + &rq); + + i915_refct_sgt_put(src_rsgt); + } + + intel_engine_pm_put(i915->gt.migrate.context->engine); + + if (ret && rq) { + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + } + + return ret ? ERR_PTR(ret) : &rq->fence; +} + +/** + * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality. + * @_dst_iter: Storage space for the destination kmap iterator. + * @_src_iter: Storage space for the source kmap iterator. + * @dst_iter: Pointer to the destination kmap iterator. + * @src_iter: Pointer to the source kmap iterator. + * @clear: Whether to clear instead of copy. + * @src_rsgt: Refcounted scatter-gather list of source memory. + * @dst_rsgt: Refcounted scatter-gather list of destination memory. + */ +struct i915_ttm_memcpy_arg { + union { + struct ttm_kmap_iter_tt tt; + struct ttm_kmap_iter_iomap io; + } _dst_iter, + _src_iter; + struct ttm_kmap_iter *dst_iter; + struct ttm_kmap_iter *src_iter; + unsigned long num_pages; + bool clear; + struct i915_refct_sgt *src_rsgt; + struct i915_refct_sgt *dst_rsgt; +}; + +/** + * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence. + * @fence: The dma-fence. + * @work: The work struct use for the memcpy work. + * @lock: The fence lock. Not used to protect anything else ATM. + * @irq_work: Low latency worker to signal the fence since it can't be done + * from the callback for lockdep reasons. + * @cb: Callback for the accelerated migration fence. + * @arg: The argument for the memcpy functionality. + */ +struct i915_ttm_memcpy_work { + struct dma_fence fence; + struct work_struct work; + /* The fence lock */ + spinlock_t lock; + struct irq_work irq_work; + struct dma_fence_cb cb; + struct i915_ttm_memcpy_arg arg; +}; + +static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg) +{ + ttm_move_memcpy(arg->clear, arg->num_pages, + arg->dst_iter, arg->src_iter); +} + +static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, + struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct intel_memory_region *dst_reg, *src_reg; + + dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); + src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); + GEM_BUG_ON(!dst_reg || !src_reg); + + arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ? + ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) : + ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap, + &dst_rsgt->table, dst_reg->region.start); + + arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ? + ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : + ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap, + &obj->ttm.cached_io_rsgt->table, + src_reg->region.start); + arg->clear = clear; + arg->num_pages = bo->base.size >> PAGE_SHIFT; + + arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt); + arg->src_rsgt = clear ? NULL : + i915_ttm_resource_get_st(obj, bo->resource); +} + +static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg) +{ + i915_refct_sgt_put(arg->src_rsgt); + i915_refct_sgt_put(arg->dst_rsgt); +} + +static void __memcpy_work(struct work_struct *work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(work, typeof(*copy_work), work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + bool cookie = dma_fence_begin_signalling(); + + i915_ttm_move_memcpy(arg); + dma_fence_end_signalling(cookie); + + dma_fence_signal(©_work->fence); + + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_irq_work(struct irq_work *irq_work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(irq_work, typeof(*copy_work), irq_work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + + dma_fence_signal(©_work->fence); + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(cb, typeof(*copy_work), cb); + + if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) { + INIT_WORK(©_work->work, __memcpy_work); + queue_work(system_unbound_wq, ©_work->work); + } else { + init_irq_work(©_work->irq_work, __memcpy_irq_work); + irq_work_queue(©_work->irq_work); + } +} + +static const char *get_driver_name(struct dma_fence *fence) +{ + return "i915_ttm_memcpy_work"; +} + +static const char *get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops dma_fence_memcpy_ops = { + .get_driver_name = get_driver_name, + .get_timeline_name = get_timeline_name, +}; + +static struct dma_fence * +i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, + struct dma_fence *dep) +{ + int ret; + + spin_lock_init(&work->lock); + dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0); + dma_fence_get(&work->fence); + ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb); + if (ret) { + if (ret != -ENOENT) + dma_fence_wait(dep, false); + + return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : + dep->error); + } + + return &work->fence; +} + +static struct dma_fence * +__i915_ttm_move(struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt, bool allow_accel, + struct dma_fence *move_dep) +{ + struct i915_ttm_memcpy_work *copy_work = NULL; + struct i915_ttm_memcpy_arg _arg, *arg = &_arg; + struct dma_fence *fence = ERR_PTR(-EINVAL); + + if (allow_accel) { + fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, + &dst_rsgt->table, move_dep); + + /* + * We only need to intercept the error when moving to lmem. + * When moving to system, TTM or shmem will provide us with + * cleared pages. + */ + if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) && + !I915_SELFTEST_ONLY(fail_gpu_migration || + fail_work_allocation)) + goto out; + } + + /* If we've scheduled gpu migration. Try to arm error intercept. */ + if (!IS_ERR(fence)) { + struct dma_fence *dep = fence; + + if (!I915_SELFTEST_ONLY(fail_work_allocation)) + copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL); + + if (copy_work) { + arg = ©_work->arg; + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + fence = i915_ttm_memcpy_work_arm(copy_work, dep); + } else { + dma_fence_wait(dep, false); + fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? + -EINVAL : fence->error); + } + dma_fence_put(dep); + + if (!IS_ERR(fence)) + goto out; + } else if (move_dep) { + int err = dma_fence_wait(move_dep, true); + + if (err) + return ERR_PTR(err); + } + + /* Error intercept failed or no accelerated migration to start with */ + if (!copy_work) + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + i915_ttm_move_memcpy(arg); + i915_ttm_memcpy_release(arg); + kfree(copy_work); + + return NULL; +out: + if (!fence && copy_work) { + i915_ttm_memcpy_release(arg); + kfree(copy_work); + } + + return fence; +} + +static struct dma_fence *prev_fence(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + struct i915_deps deps; + int ret; + + /* + * Instead of trying hard with GFP_KERNEL to allocate memory, + * the dependency collection will just sync if it doesn't + * succeed. + */ + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + ret = i915_deps_add_dependency(&deps, bo->moving, ctx); + if (!ret) + /* + * TODO: Only await excl fence here, and shared fences before + * signaling the migration fence. + */ + ret = i915_deps_add_resv(&deps, bo->base.resv, true, false, ctx); + if (ret) + return ERR_PTR(ret); + + return i915_deps_to_fence(&deps, ctx); +} + +/** + * i915_ttm_move - The TTM move callback used by i915. + * @bo: The buffer object. + * @evict: Whether this is an eviction. + * @dst_mem: The destination ttm resource. + * @hop: If we need multihop, what temporary memory type to move to. + * + * Return: 0 if successful, negative error code otherwise. + */ +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct ttm_resource_manager *dst_man = + ttm_manager_type(bo->bdev, dst_mem->mem_type); + struct dma_fence *migration_fence = NULL; + struct ttm_tt *ttm = bo->ttm; + struct i915_refct_sgt *dst_rsgt; + bool clear; + int ret; + + if (GEM_WARN_ON(!obj)) { + ttm_bo_move_null(bo, dst_mem); + return 0; + } + + ret = i915_ttm_move_notify(bo); + if (ret) + return ret; + + if (obj->mm.madv != I915_MADV_WILLNEED) { + i915_ttm_purge(obj); + ttm_resource_free(bo, &dst_mem); + return 0; + } + + /* Populate ttm with pages if needed. Typically system memory. */ + if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { + ret = ttm_tt_populate(bo->bdev, ttm, ctx); + if (ret) + return ret; + } + + dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); + if (IS_ERR(dst_rsgt)) + return PTR_ERR(dst_rsgt); + + clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); + if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) { + struct dma_fence *dep = prev_fence(bo, ctx); + + if (IS_ERR(dep)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(dep); + } + + migration_fence = __i915_ttm_move(bo, clear, dst_mem, bo->ttm, + dst_rsgt, true, dep); + dma_fence_put(dep); + } + + /* We can possibly get an -ERESTARTSYS here */ + if (IS_ERR(migration_fence)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(migration_fence); + } + + if (migration_fence) { + ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict, + true, dst_mem); + if (ret) { + dma_fence_wait(migration_fence, false); + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + dma_fence_put(migration_fence); + } else { + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + + i915_ttm_adjust_domains_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + + if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) { + obj->ttm.cached_io_rsgt = dst_rsgt; + obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; + obj->ttm.get_io_page.sg_idx = 0; + } else { + i915_refct_sgt_put(dst_rsgt); + } + + i915_ttm_adjust_lru(obj); + i915_ttm_adjust_gem_after_move(obj); + return 0; +} + +/** + * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to + * another + * @dst: The destination object + * @src: The source object + * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. + * @intr: Whether to perform waits interruptible: + * + * Note: The caller is responsible for assuring that the underlying + * TTM objects are populated if needed and locked. + * + * Return: Zero on success. Negative error code on error. If @intr == true, + * then it may return -ERESTARTSYS or -EINTR. + */ +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr) +{ + struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); + struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); + struct ttm_operation_ctx ctx = { + .interruptible = intr, + }; + struct i915_refct_sgt *dst_rsgt; + struct dma_fence *copy_fence, *dep_fence; + struct i915_deps deps; + int ret, shared_err; + + assert_object_held(dst); + assert_object_held(src); + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + + /* + * We plan to add a shared fence only for the source. If that + * fails, we await all source fences before commencing + * the copy instead of only the exclusive. + */ + shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1); + ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx); + if (!ret) + ret = i915_deps_add_resv(&deps, src_bo->base.resv, + !!shared_err, false, &ctx); + if (ret) + return ret; + + dep_fence = i915_deps_to_fence(&deps, &ctx); + if (IS_ERR(dep_fence)) + return PTR_ERR(dep_fence); + + dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); + copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource, + dst_bo->ttm, dst_rsgt, allow_accel, + dep_fence); + + i915_refct_sgt_put(dst_rsgt); + if (IS_ERR_OR_NULL(copy_fence)) + return PTR_ERR_OR_ZERO(copy_fence); + + dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); + + /* If we failed to reserve a shared slot, add an exclusive fence */ + if (shared_err) + dma_resv_add_excl_fence(src_bo->base.resv, copy_fence); + else + dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); + + dma_fence_put(copy_fence); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h new file mode 100644 index 000000000000..d2e7f149e05c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_GEM_TTM_MOVE_H_ +#define _I915_GEM_TTM_MOVE_H_ + +#include <linux/types.h> + +#include "i915_selftest.h" + +struct ttm_buffer_object; +struct ttm_operation_ctx; +struct ttm_place; +struct ttm_resource; +struct ttm_tt; + +struct drm_i915_gem_object; +struct i915_refct_sgt; + +int i915_ttm_move_notify(struct ttm_buffer_object *bo); + +I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation)); + +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr); + +/* Internal I915 TTM declarations and definitions below. */ + +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop); + +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj); + +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c new file mode 100644 index 000000000000..9aad84059d56 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_tt.h> + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "intel_region_ttm.h" + +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" +#include "gem/i915_gem_ttm_pm.h" + +/** + * i915_ttm_backup_free - Free any backup attached to this object + * @obj: The object whose backup is to be freed. + */ +void i915_ttm_backup_free(struct drm_i915_gem_object *obj) +{ + if (obj->ttm.backup) { + i915_gem_object_put(obj->ttm.backup); + obj->ttm.backup = NULL; + } +} + +/** + * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore + * @base: The i915_gem_apply_to_region we derive from. + * @allow_gpu: Whether using the gpu blitter is allowed. + * @backup_pinned: On backup, backup also pinned objects. + */ +struct i915_gem_ttm_pm_apply { + struct i915_gem_apply_to_region base; + bool allow_gpu : 1; + bool backup_pinned : 1; +}; + +static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, + struct drm_i915_gem_object *obj) +{ + struct i915_gem_ttm_pm_apply *pm_apply = + container_of(apply, typeof(*pm_apply), base); + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct ttm_buffer_object *backup_bo; + struct drm_i915_private *i915 = + container_of(bo->bdev, typeof(*i915), bdev); + struct drm_i915_gem_object *backup; + struct ttm_operation_ctx ctx = {}; + int err = 0; + + if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) + return 0; + + if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) + return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); + + if (!pm_apply->backup_pinned || + (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY))) + return 0; + + if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) + return 0; + + backup = i915_gem_object_create_shmem(i915, obj->base.size); + if (IS_ERR(backup)) + return PTR_ERR(backup); + + err = i915_gem_object_lock(backup, apply->ww); + if (err) + goto out_no_lock; + + backup_bo = i915_gem_to_ttm(backup); + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + if (err) + goto out_no_populate; + + err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); + GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); + + obj->ttm.backup = backup; + return 0; + +out_no_populate: + i915_gem_ww_unlock_single(backup); +out_no_lock: + i915_gem_object_put(backup); + + return err; +} + +static int i915_ttm_recover(struct i915_gem_apply_to_region *apply, + struct drm_i915_gem_object *obj) +{ + i915_ttm_backup_free(obj); + return 0; +} + +/** + * i915_ttm_recover_region - Free the backup of all objects of a region + * @mr: The memory region + * + * Checks all objects of a region if there is backup attached and if so + * frees that backup. Typically this is called to recover after a partially + * performed backup. + */ +void i915_ttm_recover_region(struct intel_memory_region *mr) +{ + static const struct i915_gem_apply_to_region_ops recover_ops = { + .process_obj = i915_ttm_recover, + }; + struct i915_gem_apply_to_region apply = {.ops = &recover_ops}; + int ret; + + ret = i915_gem_process_region(mr, &apply); + GEM_WARN_ON(ret); +} + +/** + * i915_ttm_backup_region - Back up all objects of a region to smem. + * @mr: The memory region + * @allow_gpu: Whether to allow the gpu blitter for this backup. + * @backup_pinned: Backup also pinned objects. + * + * Loops over all objects of a region and either evicts them if they are + * evictable or backs them up using a backup object if they are pinned. + * + * Return: Zero on success. Negative error code on error. + */ +int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags) +{ + static const struct i915_gem_apply_to_region_ops backup_ops = { + .process_obj = i915_ttm_backup, + }; + struct i915_gem_ttm_pm_apply pm_apply = { + .base = {.ops = &backup_ops}, + .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, + .backup_pinned = flags & I915_TTM_BACKUP_PINNED, + }; + + return i915_gem_process_region(mr, &pm_apply.base); +} + +static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, + struct drm_i915_gem_object *obj) +{ + struct i915_gem_ttm_pm_apply *pm_apply = + container_of(apply, typeof(*pm_apply), base); + struct drm_i915_gem_object *backup = obj->ttm.backup; + struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup); + struct ttm_operation_ctx ctx = {}; + int err; + + if (!backup) + return 0; + + if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY)) + return 0; + + err = i915_gem_object_lock(backup, apply->ww); + if (err) + return err; + + /* Content may have been swapped. */ + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + if (!err) { + err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, + false); + GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); + + obj->ttm.backup = NULL; + err = 0; + } + + i915_gem_ww_unlock_single(backup); + + if (!err) + i915_gem_object_put(backup); + + return err; +} + +/** + * i915_ttm_restore_region - Restore backed-up objects of a region from smem. + * @mr: The memory region + * @allow_gpu: Whether to allow the gpu blitter to recover. + * + * Loops over all objects of a region and if they are backed-up, restores + * them from smem. + * + * Return: Zero on success. Negative error code on error. + */ +int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags) +{ + static const struct i915_gem_apply_to_region_ops restore_ops = { + .process_obj = i915_ttm_restore, + }; + struct i915_gem_ttm_pm_apply pm_apply = { + .base = {.ops = &restore_ops}, + .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, + }; + + return i915_gem_process_region(mr, &pm_apply.base); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h new file mode 100644 index 000000000000..25ed67a31571 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _I915_GEM_TTM_PM_H_ +#define _I915_GEM_TTM_PM_H_ + +#include <linux/types.h> + +struct intel_memory_region; +struct drm_i915_gem_object; + +#define I915_TTM_BACKUP_ALLOW_GPU BIT(0) +#define I915_TTM_BACKUP_PINNED BIT(1) + +int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags); + +void i915_ttm_recover_region(struct intel_memory_region *mr); + +int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags); + +/* Internal I915 TTM functions below. */ +void i915_ttm_backup_free(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 8ea0fa665e53..3173c9f9a040 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -165,8 +165,11 @@ alloc_table: goto err; } - sg_page_sizes = i915_sg_dma_sizes(st->sgl); + WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); + if (i915_gem_object_can_bypass_llc(obj)) + obj->cache_dirty = true; + sg_page_sizes = i915_sg_dma_sizes(st->sgl); __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -546,7 +549,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, + I915_BO_ALLOC_USER); obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index f909aaa09d9c..dab3d30c09a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -10,7 +10,6 @@ #include "gt/intel_engine.h" -#include "dma_resv_utils.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -25,7 +24,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, return timeout; if (dma_fence_is_i915(fence)) - return i915_request_wait(to_request(fence), flags, timeout); + return i915_request_wait_timeout(to_request(fence), flags, timeout); return dma_fence_wait_timeout(fence, flags & I915_WAIT_INTERRUPTIBLE, @@ -37,58 +36,22 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, unsigned int flags, long timeout) { - struct dma_fence *excl; - bool prune_fences = false; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout); - if (timeout < 0) - break; - - dma_fence_put(shared[i]); - } - - for (; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); + struct dma_resv_iter cursor; + struct dma_fence *fence; + long ret = timeout ?: 1; + + dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + ret = i915_gem_object_wait_fence(fence, flags, timeout); + if (ret <= 0) + break; - /* - * If both shared fences and an exclusive fence exist, - * then by construction the shared fences must be later - * than the exclusive fence. If we successfully wait for - * all the shared fences, we know that the exclusive fence - * must all be signaled. If all the shared fences are - * signaled, we can prune the array and recover the - * floating references on the fences/requests. - */ - prune_fences = count && timeout >= 0; - } else { - excl = dma_resv_get_excl_unlocked(resv); + if (timeout) + timeout = ret; } + dma_resv_iter_end(&cursor); - if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout); - - dma_fence_put(excl); - - /* - * Opportunistically prune the fences iff we know they have *all* been - * signaled. - */ - if (prune_fences) - dma_resv_prune(resv); - - return timeout; + return ret; } static void fence_set_priority(struct dma_fence *fence, @@ -151,32 +114,13 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *fence; - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = dma_resv_get_fences(obj->base.resv, &excl, &count, - &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - i915_gem_fence_wait_priority(shared[i], attr); - dma_fence_put(shared[i]); - } - - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(obj->base.resv); - } - - if (excl) { - i915_gem_fence_wait_priority(excl, attr); - dma_fence_put(excl); - } + dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); + dma_resv_for_each_fence_unlocked(&cursor, fence) + i915_gem_fence_wait_priority(fence, attr); + dma_resv_iter_end(&cursor); return 0; } @@ -196,7 +140,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, timeout = i915_gem_object_wait_reservation(obj->base.resv, flags, timeout); - return timeout < 0 ? timeout : 0; + + if (timeout < 0) + return timeout; + + return !timeout ? -ETIME : 0; } static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) @@ -306,6 +254,6 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, unsigned int flags) { might_sleep(); - /* NOP for now. */ - return 0; + + return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE)); } diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index 5e6e8c91ab38..7271fbf813fa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -6,15 +6,16 @@ #include <linux/fs.h> #include <linux/mount.h> -#include <linux/pagemap.h> #include "i915_drv.h" #include "i915_gemfs.h" int i915_gemfs_init(struct drm_i915_private *i915) { + char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; struct vfsmount *gemfs; + char *opts; type = get_fs_type("tmpfs"); if (!type) @@ -26,10 +27,24 @@ int i915_gemfs_init(struct drm_i915_private *i915) * * One example, although it is probably better with a per-file * control, is selecting huge page allocations ("huge=within_size"). - * Currently unused due to bandwidth issues (slow reads) on Broadwell+. + * However, we only do so to offset the overhead of iommu lookups + * due to bandwidth issues (slow reads) on Broadwell+. */ - gemfs = kern_mount(type); + opts = NULL; + if (intel_vtd_active(i915)) { + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + opts = huge_opt; + drm_info(&i915->drm, + "Transparent Hugepage mode '%s'\n", + opts); + } else { + drm_notice(&i915->drm, + "Transparent Hugepage support is recommended for optimal performance when IOMMU is enabled!\n"); + } + } + + gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, opts); if (IS_ERR(gemfs)) return PTR_ERR(gemfs); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index a094f3ce1a90..c69c7d45aabc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -22,6 +22,22 @@ #include "selftests/mock_region.h" #include "selftests/i915_random.h" +static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915, + struct file *file) +{ + struct i915_gem_context *ctx = live_context(i915, file); + struct i915_address_space *vm; + + if (IS_ERR(ctx)) + return ctx; + + vm = ctx->vm; + if (vm) + WRITE_ONCE(vm->scrub_64K, true); + + return ctx; +} + static const unsigned int page_sizes[] = { I915_GTT_PAGE_SIZE_2M, I915_GTT_PAGE_SIZE_64K, @@ -136,6 +152,8 @@ static void put_huge_pages(struct drm_i915_gem_object *obj, huge_pages_free_pages(pages); obj->mm.dirty = false; + + __start_cpu_write(obj); } static const struct drm_i915_gem_object_ops huge_page_ops = { @@ -152,6 +170,7 @@ huge_pages_object(struct drm_i915_private *i915, { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; + unsigned int cache_level; GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); @@ -173,7 +192,9 @@ huge_pages_object(struct drm_i915_private *i915, obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; + + cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; + i915_gem_object_set_cache_coherency(obj, cache_level); obj->mm.page_mask = page_mask; @@ -547,7 +568,7 @@ out_unpin: out_put: i915_gem_object_put(obj); out_region: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -954,6 +975,8 @@ static int igt_mock_ppgtt_64K(void *arg) __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); + + i915_gem_drain_freed_objects(i915); } } @@ -1075,10 +1098,6 @@ static int __igt_write_huge(struct intel_context *ce, if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_unbind(vma); - if (err) - return err; - err = i915_vma_pin(vma, size, 0, flags | offset); if (err) { /* @@ -1112,7 +1131,7 @@ out_vma_unpin: return err; } -static int igt_write_huge(struct i915_gem_context *ctx, +static int igt_write_huge(struct drm_i915_private *i915, struct drm_i915_gem_object *obj) { struct i915_gem_engines *engines; @@ -1122,6 +1141,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, IGT_TIMEOUT(end_time); unsigned int max_page_size; unsigned int count; + struct i915_gem_context *ctx; + struct file *file; u64 max; u64 num; u64 size; @@ -1129,6 +1150,16 @@ static int igt_write_huge(struct i915_gem_context *ctx, int i, n; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); size = obj->base.size; @@ -1148,7 +1179,7 @@ static int igt_write_huge(struct i915_gem_context *ctx, } i915_gem_context_unlock_engines(ctx); if (!n) - return 0; + goto out; /* * To keep things interesting when alternating between engines in our @@ -1210,6 +1241,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, kfree(order); +out: + fput(file); return err; } @@ -1272,8 +1305,7 @@ static u32 igt_random_size(struct rnd_state *prng, static int igt_ppgtt_smoke_huge(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; I915_RND_STATE(prng); struct { @@ -1297,6 +1329,7 @@ static int igt_ppgtt_smoke_huge(void *arg) u32 min = backends[i].min; u32 max = backends[i].max; u32 size = max; + try_again: size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); @@ -1331,7 +1364,7 @@ try_again: goto out_unpin; } - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); if (err) { pr_err("%s write-huge failed with size=%u, i=%d\n", __func__, size, i); @@ -1358,8 +1391,7 @@ out_put: static int igt_ppgtt_sanity_check(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; unsigned int supported = INTEL_INFO(i915)->page_sizes; struct { igt_create_fn fn; @@ -1426,7 +1458,7 @@ static int igt_ppgtt_sanity_check(void *arg) if (pages) obj->mm.page_sizes.sg = pages; - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); @@ -1453,15 +1485,27 @@ out: static int igt_tmpfs_fallback(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); struct drm_i915_gem_object *obj; struct i915_vma *vma; + struct file *file; u32 *vaddr; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); + /* * Make sure that we don't burst into a ball of flames upon falling back * to tmpfs, which we rely on if on the off-chance we encouter a failure @@ -1505,32 +1549,47 @@ out_restore: i915->mm.gemfs = gemfs; i915_vm_put(vm); +out: + fput(file); return err; } static int igt_shrink_thp(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct drm_i915_gem_object *obj; struct i915_gem_engines_iter it; struct intel_context *ce; struct i915_vma *vma; + struct file *file; unsigned int flags = PIN_USER; unsigned int n; - int err = 0; + bool should_swap; + int err; + + if (!igt_can_allocate_thp(i915)) { + pr_info("missing THP support, skipping\n"); + return 0; + } + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); /* * Sanity check shrinking huge-paged object -- make sure nothing blows * up. */ - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - goto out_vm; - } - obj = i915_gem_object_create_shmem(i915, SZ_2M); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -1567,23 +1626,39 @@ static int igt_shrink_thp(void *arg) break; } i915_gem_context_unlock_engines(ctx); + /* + * Nuke everything *before* we unpin the pages so we can be reasonably + * sure that when later checking get_nr_swap_pages() that some random + * leftover object doesn't steal the remaining swap space. + */ + i915_gem_shrink(NULL, i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); i915_vma_unpin(vma); if (err) goto out_put; /* - * Now that the pages are *unpinned* shrink-all should invoke - * shmem to truncate our pages. + * Now that the pages are *unpinned* shrinking should invoke + * shmem to truncate our pages, if we have available swap. */ - i915_gem_shrink_all(i915); - if (i915_gem_object_has_pages(obj)) { - pr_err("shrink-all didn't truncate the pages\n"); + should_swap = get_nr_swap_pages() > 0; + i915_gem_shrink(NULL, i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE | + I915_SHRINK_WRITEBACK); + if (should_swap == i915_gem_object_has_pages(obj)) { + pr_err("unexpected pages mismatch, should_swap=%s\n", + yesno(should_swap)); err = -EINVAL; goto out_put; } - if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) { - pr_err("residual page-size bits left\n"); + if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) { + pr_err("unexpected residual page-size bits, should_swap=%s\n", + yesno(should_swap)); err = -EINVAL; goto out_put; } @@ -1604,7 +1679,8 @@ out_put: i915_gem_object_put(obj); out_vm: i915_vm_put(vm); - +out: + fput(file); return err; } @@ -1629,7 +1705,7 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - ppgtt = i915_ppgtt_create(&dev_priv->gt); + ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1665,10 +1741,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_smoke_huge), SUBTEST(igt_ppgtt_sanity_check), }; - struct i915_gem_context *ctx; - struct i915_address_space *vm; - struct file *file; - int err; if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); @@ -1678,25 +1750,5 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - - mutex_lock(&ctx->mutex); - vm = i915_gem_context_vm(ctx); - if (vm) - WRITE_ONCE(vm->scrub_64K, true); - mutex_unlock(&ctx->mutex); - - err = i915_subtests(tests, ctx); - -out_file: - fput(file); - return err; + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index ecbcbb86ae1e..8402ed925a69 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -17,13 +17,20 @@ #include "huge_gem_object.h" #include "mock_context.h" +enum client_tiling { + CLIENT_TILING_LINEAR, + CLIENT_TILING_X, + CLIENT_TILING_Y, + CLIENT_NUM_TILING_TYPES +}; + #define WIDTH 512 #define HEIGHT 32 struct blit_buffer { struct i915_vma *vma; u32 start_val; - u32 tiling; + enum client_tiling tiling; }; struct tiled_blits { @@ -53,9 +60,9 @@ static int prepare_blit(const struct tiled_blits *t, *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(BCS_SWCTRL); cmd = (BCS_SRC_Y | BCS_DST_Y) << 16; - if (src->tiling == I915_TILING_Y) + if (src->tiling == CLIENT_TILING_Y) cmd |= BCS_SRC_Y; - if (dst->tiling == I915_TILING_Y) + if (dst->tiling == CLIENT_TILING_Y) cmd |= BCS_DST_Y; *cs++ = cmd; @@ -172,7 +179,7 @@ static int tiled_blits_create_buffers(struct tiled_blits *t, t->buffers[i].vma = vma; t->buffers[i].tiling = - i915_prandom_u32_max_state(I915_TILING_Y + 1, prng); + i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng); } return 0; @@ -197,17 +204,17 @@ static u64 swizzle_bit(unsigned int bit, u64 offset) static u64 tiled_offset(const struct intel_gt *gt, u64 v, unsigned int stride, - unsigned int tiling) + enum client_tiling tiling) { unsigned int swizzle; u64 x, y; - if (tiling == I915_TILING_NONE) + if (tiling == CLIENT_TILING_LINEAR) return v; y = div64_u64_rem(v, stride, &x); - if (tiling == I915_TILING_X) { + if (tiling == CLIENT_TILING_X) { v = div64_u64_rem(y, 8, &y) * stride * 8; v += y * 512; v += div64_u64_rem(x, 512, &x) << 12; @@ -244,12 +251,12 @@ static u64 tiled_offset(const struct intel_gt *gt, return v; } -static const char *repr_tiling(int tiling) +static const char *repr_tiling(enum client_tiling tiling) { switch (tiling) { - case I915_TILING_NONE: return "linear"; - case I915_TILING_X: return "X"; - case I915_TILING_Y: return "Y"; + case CLIENT_TILING_LINEAR: return "linear"; + case CLIENT_TILING_X: return "X"; + case CLIENT_TILING_Y: return "Y"; default: return "unknown"; } } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 8eb5050f8cb3..21b71568cd5f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -27,12 +27,6 @@ #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) -static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx) -{ - /* single threaded, private ctx */ - return rcu_dereference_protected(ctx->vm, true); -} - static int live_nop_switch(void *arg) { const unsigned int nctx = 1024; @@ -94,7 +88,7 @@ static int live_nop_switch(void *arg) rq = i915_request_get(this); i915_request_add(this); } - if (i915_request_wait(rq, 0, HZ / 5) < 0) { + if (i915_request_wait(rq, 0, 10 * HZ) < 0) { pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(&i915->gt); i915_request_put(rq); @@ -704,7 +698,7 @@ static int igt_ctx_exec(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, - yesno(!!rcu_access_pointer(ctx->vm)), + yesno(i915_gem_context_has_full_ppgtt(ctx)), err); intel_context_put(ce); kernel_context_close(ctx); @@ -813,7 +807,7 @@ static int igt_shared_ctx_exec(void *arg) struct i915_gem_context *ctx; struct intel_context *ce; - ctx = kernel_context(i915, ctx_vm(parent)); + ctx = kernel_context(i915, parent->vm); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_test; @@ -823,7 +817,7 @@ static int igt_shared_ctx_exec(void *arg) GEM_BUG_ON(IS_ERR(ce)); if (!obj) { - obj = create_test_object(ctx_vm(parent), + obj = create_test_object(parent->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -838,7 +832,7 @@ static int igt_shared_ctx_exec(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, - yesno(!!rcu_access_pointer(ctx->vm)), + yesno(i915_gem_context_has_full_ppgtt(ctx)), err); intel_context_put(ce); kernel_context_close(ctx); @@ -1380,7 +1374,7 @@ static int igt_ctx_readonly(void *arg) goto out_file; } - vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm; + vm = ctx->vm ?: &i915->ggtt.alias->vm; if (!vm || !vm->has_read_only) { err = 0; goto out_file; @@ -1417,7 +1411,7 @@ static int igt_ctx_readonly(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), ce->engine->name, - yesno(!!ctx_vm(ctx)), + yesno(i915_gem_context_has_full_ppgtt(ctx)), err); i915_gem_context_unlock_engines(ctx); goto out_file; @@ -1499,7 +1493,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); - err = check_scratch(ctx_vm(ctx), offset); + err = check_scratch(ctx->vm, offset); if (err) return err; @@ -1528,7 +1522,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, intel_gt_chipset_flush(engine->gt); - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1596,7 +1590,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); - err = check_scratch(ctx_vm(ctx), offset); + err = check_scratch(ctx->vm, offset); if (err) return err; @@ -1607,7 +1601,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (GRAPHICS_VER(i915) >= 8) { const u32 GPR0 = engine->mmio_base + 0x600; - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1739,7 +1733,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) u32 *vaddr; int err = 0; - vm = ctx_vm(ctx); + vm = ctx->vm; if (!vm) return -ENODEV; @@ -1801,7 +1795,7 @@ static int igt_vm_isolation(void *arg) } /* We can only test vm isolation, if the vm are distinct */ - if (ctx_vm(ctx_a) == ctx_vm(ctx_b)) + if (ctx_a->vm == ctx_b->vm) goto out_file; /* Read the initial state of the scratch page */ @@ -1813,8 +1807,8 @@ static int igt_vm_isolation(void *arg) if (err) goto out_file; - vm_total = ctx_vm(ctx_a)->total; - GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total); + vm_total = ctx_a->vm->total; + GEM_BUG_ON(ctx_b->vm->total != vm_total); count = 0; num_engines = 0; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index ffae7df5e4d7..3cc74b0fed06 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg) err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import != &obj->base) { pr_err("i915_gem_prime_import created a new object!\n"); err = -EINVAL; goto out_import; } - import_obj = to_intel_bo(import); i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); @@ -102,7 +102,7 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } @@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", PTR_ERR(import)); err = PTR_ERR(import); + } else { + err = 0; } dma_buf_put(dmabuf); @@ -156,7 +158,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, regions, num_regions); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } @@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import == &obj->base) { pr_err("i915_gem_prime_import reused gem object!\n"); @@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_import; } - import_obj = to_intel_bo(import); - i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); if (err) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c deleted file mode 100644 index 16162fc2782d..000000000000 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2020 Intel Corporation - */ - -#include "i915_selftest.h" - -#include "gt/intel_engine_pm.h" -#include "selftests/igt_flush_test.h" - -static u64 read_reloc(const u32 *map, int x, const u64 mask) -{ - u64 reloc; - - memcpy(&reloc, &map[x], sizeof(reloc)); - return reloc & mask; -} - -static int __igt_gpu_reloc(struct i915_execbuffer *eb, - struct drm_i915_gem_object *obj) -{ - const unsigned int offsets[] = { 8, 3, 0 }; - const u64 mask = - GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0); - const u32 *map = page_mask_bits(obj->mm.mapping); - struct i915_request *rq; - struct i915_vma *vma; - int err; - int i; - - vma = i915_vma_instance(obj, eb->context->vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_gem_object_lock(obj, &eb->ww); - if (err) - return err; - - err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH); - if (err) - return err; - - /* 8-Byte aligned */ - err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0); - if (err <= 0) - goto reloc_err; - - /* !8-Byte aligned */ - err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1); - if (err <= 0) - goto reloc_err; - - /* Skip to the end of the cmd page */ - i = PAGE_SIZE / sizeof(u32) - 1; - i -= eb->reloc_cache.rq_size; - memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size, - MI_NOOP, i); - eb->reloc_cache.rq_size += i; - - /* Force next batch */ - err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2); - if (err <= 0) - goto reloc_err; - - GEM_BUG_ON(!eb->reloc_cache.rq); - rq = i915_request_get(eb->reloc_cache.rq); - reloc_gpu_flush(eb, &eb->reloc_cache); - GEM_BUG_ON(eb->reloc_cache.rq); - - err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2); - if (err) { - intel_gt_set_wedged(eb->engine->gt); - goto put_rq; - } - - if (!i915_request_completed(rq)) { - pr_err("%s: did not wait for relocations!\n", eb->engine->name); - err = -EINVAL; - goto put_rq; - } - - for (i = 0; i < ARRAY_SIZE(offsets); i++) { - u64 reloc = read_reloc(map, offsets[i], mask); - - if (reloc != i) { - pr_err("%s[%d]: map[%d] %llx != %x\n", - eb->engine->name, i, offsets[i], reloc, i); - err = -EINVAL; - } - } - if (err) - igt_hexdump(map, 4096); - -put_rq: - i915_request_put(rq); -unpin_vma: - i915_vma_unpin(vma); - return err; - -reloc_err: - if (!err) - err = -EIO; - goto unpin_vma; -} - -static int igt_gpu_reloc(void *arg) -{ - struct i915_execbuffer eb; - struct drm_i915_gem_object *scratch; - int err = 0; - u32 *map; - - eb.i915 = arg; - - scratch = i915_gem_object_create_internal(eb.i915, 4096); - if (IS_ERR(scratch)) - return PTR_ERR(scratch); - - map = i915_gem_object_pin_map_unlocked(scratch, I915_MAP_WC); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto err_scratch; - } - - intel_gt_pm_get(&eb.i915->gt); - - for_each_uabi_engine(eb.engine, eb.i915) { - if (intel_engine_requires_cmd_parser(eb.engine) || - intel_engine_using_cmd_parser(eb.engine)) - continue; - - reloc_cache_init(&eb.reloc_cache, eb.i915); - memset(map, POISON_INUSE, 4096); - - intel_engine_pm_get(eb.engine); - eb.context = intel_context_create(eb.engine); - if (IS_ERR(eb.context)) { - err = PTR_ERR(eb.context); - goto err_pm; - } - eb.reloc_pool = NULL; - eb.reloc_context = NULL; - - i915_gem_ww_ctx_init(&eb.ww, false); -retry: - err = intel_context_pin_ww(eb.context, &eb.ww); - if (!err) { - err = __igt_gpu_reloc(&eb, scratch); - - intel_context_unpin(eb.context); - } - if (err == -EDEADLK) { - err = i915_gem_ww_ctx_backoff(&eb.ww); - if (!err) - goto retry; - } - i915_gem_ww_ctx_fini(&eb.ww); - - if (eb.reloc_pool) - intel_gt_buffer_pool_put(eb.reloc_pool); - if (eb.reloc_context) - intel_context_put(eb.reloc_context); - - intel_context_put(eb.context); -err_pm: - intel_engine_pm_put(eb.engine); - if (err) - break; - } - - if (igt_flush_test(eb.i915)) - err = -EIO; - - intel_gt_pm_put(&eb.i915->gt); -err_scratch: - i915_gem_object_put(scratch); - return err; -} - -int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_gpu_reloc), - }; - - if (intel_gt_is_wedged(&i915->gt)) - return 0; - - return i915_live_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 28a700f08b49..4b8e6b098659 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -4,6 +4,7 @@ */ #include "gt/intel_migrate.h" +#include "gem/i915_gem_ttm_move.h" static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, bool fill) @@ -227,13 +228,34 @@ out_put: return err; } +static int igt_lmem_pages_failsafe_migrate(void *arg) +{ + int fail_gpu, fail_alloc, ret; + + for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { + for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) { + pr_info("Simulated failure modes: gpu: %d, alloc: %d\n", + fail_gpu, fail_alloc); + i915_ttm_migrate_set_failure_modes(fail_gpu, + fail_alloc); + ret = igt_lmem_pages_migrate(arg); + if (ret) + goto out_err; + } + } + +out_err: + i915_ttm_migrate_set_failure_modes(false, false); + return ret; +} + int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_smem_create_migrate), SUBTEST(igt_lmem_create_migrate), SUBTEST(igt_same_create_migrate), - SUBTEST(igt_lmem_pages_migrate), + SUBTEST(igt_lmem_pages_failsafe_migrate), }; if (!HAS_LMEM(i915)) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b20f5621f62b..6d30cdfa80f3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) return I915_MMAP_TYPE_GTT; } +static struct drm_i915_gem_object * +create_sys_or_internal(struct drm_i915_private *i915, + unsigned long size) +{ + if (HAS_LMEM(i915)) { + struct intel_memory_region *sys_region = + i915->mm.regions[INTEL_REGION_SMEM]; + + return __i915_gem_object_create_user(i915, size, &sys_region, 1); + } + + return i915_gem_object_create_internal(i915, size); +} + static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) @@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, u64 offset; int ret; - obj = i915_gem_object_create_internal(i915, size); + obj = create_sys_or_internal(i915, size); if (IS_ERR(obj)) return expected && expected == PTR_ERR(obj); @@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm_node *hole, *next; int loop, err = 0; u64 offset; + int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ disable_retire_worker(i915); @@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg) } /* Too large */ - if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("Unable to create object for reclaimed hole\n"); @@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; @@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); bool no_map; - if (HAS_LMEM(i915)) + if (obj->ops->mmap_offset) return type == I915_MMAP_TYPE_FIXED; else if (type == I915_MMAP_TYPE_FIXED) return false; @@ -889,7 +903,9 @@ static int __igt_mmap(struct drm_i915_private *i915, pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr); + mmap_read_lock(current->mm); area = vma_lookup(current->mm, addr); + mmap_read_unlock(current->mm); if (!area) { pr_err("%s: Did not create a vm_area_struct for the mmap\n", obj->mm.region->name); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index fee070df1c97..c0a8ef368044 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -23,6 +23,7 @@ mock_context(struct drm_i915_private *i915, kref_init(&ctx->ref); INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + INIT_WORK(&ctx->release_work, i915_gem_context_release_work); mutex_init(&ctx->mutex); @@ -87,7 +88,7 @@ live_context(struct drm_i915_private *i915, struct file *file) return ERR_CAST(pc); ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; @@ -162,7 +163,7 @@ kernel_context(struct drm_i915_private *i915, } ctx = i915_gem_create_context(i915, pc); - proto_context_close(pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) return ctx; diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h deleted file mode 100644 index f69257eaa1cc..000000000000 --- a/drivers/gpu/drm/i915/gt/debugfs_engines.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2019 Intel Corporation - */ - -#ifndef DEBUGFS_ENGINES_H -#define DEBUGFS_ENGINES_H - -struct intel_gt; -struct dentry; - -void debugfs_engines_register(struct intel_gt *gt, struct dentry *root); - -#endif /* DEBUGFS_ENGINES_H */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c deleted file mode 100644 index 591eb60785db..000000000000 --- a/drivers/gpu/drm/i915/gt/debugfs_gt.c +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright © 2019 Intel Corporation - */ - -#include <linux/debugfs.h> - -#include "debugfs_engines.h" -#include "debugfs_gt.h" -#include "debugfs_gt_pm.h" -#include "intel_sseu_debugfs.h" -#include "uc/intel_uc_debugfs.h" -#include "i915_drv.h" - -void debugfs_gt_register(struct intel_gt *gt) -{ - struct dentry *root; - - if (!gt->i915->drm.primary->debugfs_root) - return; - - root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root); - if (IS_ERR(root)) - return; - - debugfs_engines_register(gt, root); - debugfs_gt_pm_register(gt, root); - intel_sseu_debugfs_register(gt, root); - - intel_uc_debugfs_register(>->uc, root); -} - -void intel_gt_debugfs_register_files(struct dentry *root, - const struct debugfs_gt_file *files, - unsigned long count, void *data) -{ - while (count--) { - umode_t mode = files->fops->write ? 0644 : 0444; - - if (!files->eval || files->eval(data)) - debugfs_create_file(files->name, - mode, root, data, - files->fops); - - files++; - } -} diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h deleted file mode 100644 index 4cf5f5c9da7d..000000000000 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2019 Intel Corporation - */ - -#ifndef DEBUGFS_GT_PM_H -#define DEBUGFS_GT_PM_H - -struct intel_gt; -struct dentry; - -void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root); - -#endif /* DEBUGFS_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 1aee5e6b1b23..4a166d25fe60 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm, pt = stash->pt[0]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill32_px(pt, vm->scratch[0]->encode); @@ -262,13 +261,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - __i915_vma_put(ppgtt->vma); - gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); free_pd(&ppgtt->base.vm, ppgtt->base.pd); } @@ -331,37 +327,6 @@ static const struct i915_vma_ops pd_vma_ops = { .unbind_vma = pd_vma_unbind, }; -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL, 0); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); @@ -378,42 +343,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; + /* grab the ppgtt resv to pin the object */ + err = i915_vm_lock_objects(&ppgtt->base.vm, ww); + if (err) + return err; /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = 0; - if (!atomic_read(&ppgtt->pin_count)) + if (!atomic_read(&ppgtt->pin_count)) { err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); + + GEM_BUG_ON(ppgtt->vma->fence); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma)); + } if (!err) atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); return err; } -void gen6_ppgtt_unpin(struct i915_ppgtt *base) +static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + obj->mm.pages = ZERO_SIZE_PTR; + return 0; +} - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); +static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ } -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = { + .name = "pd_dummy_obj", + .get_pages = pd_dummy_obj_get_pages, + .put_pages = pd_dummy_obj_put_pages, +}; + +static struct i915_page_directory * +gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_page_directory *pd; + int err; - if (!atomic_read(&ppgtt->pin_count)) - return; + pd = __alloc_pd(I915_PDES); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); + pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915, + &pd_dummy_obj_ops, + I915_PDES * SZ_4K); + if (IS_ERR(pd->pt.base)) { + err = PTR_ERR(pd->pt.base); + pd->pt.base = NULL; + goto err_pd; + } + + pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm); + pd->pt.base->shares_resv_from = &ppgtt->base.vm; + + ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + ppgtt->vma = NULL; + goto err_pd; + } + + /* The dummy object we create is special, override ops.. */ + ppgtt->vma->ops = &pd_vma_ops; + ppgtt->vma->private = ppgtt; + return pd; + +err_pd: + free_pd(&ppgtt->base.vm, pd); + return ERR_PTR(err); +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) @@ -427,9 +442,8 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) return ERR_PTR(-ENOMEM); mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); - ppgtt_init(&ppgtt->base, gt); + ppgtt_init(&ppgtt->base, gt, 0); ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); ppgtt->base.vm.top = 1; @@ -442,19 +456,13 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(I915_PDES); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - err = gen6_ppgtt_init_scratch(ppgtt); if (err) - goto err_pd; + goto err_free; - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); + ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); + if (IS_ERR(ppgtt->base.pd)) { + err = PTR_ERR(ppgtt->base.pd); goto err_scratch; } @@ -462,10 +470,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err_scratch: free_scratch(&ppgtt->base.vm); -err_pd: - free_pd(&ppgtt->base.vm, ppgtt->base.pd); err_free: - mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h index 6a61a5c3a85a..5e5cf2ec3309 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -19,7 +19,6 @@ struct gen6_ppgtt { u32 pp_dir; atomic_t pin_count; - struct mutex pin_mutex; bool scan_for_unused_pt; }; @@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww); void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void gen6_ppgtt_enable(struct intel_gt *gt); void gen7_ppgtt_enable(struct intel_gt *gt); struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 461844dffd7e..e320610dd0b8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) dc_flush_wa = true; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 6e0e52eeb87a..95c02096a61b 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -18,7 +18,7 @@ static u64 gen8_pde_encode(const dma_addr_t addr, const enum i915_cache_level level) { - u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; + pte &= ~GEN8_PAGE_RW; if (flags & PTE_LM) pte |= GEN12_PPGTT_PTE_LM; @@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, pt = stash->pt[!!lvl]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill_px(pt, vm->scratch[lvl]->encode); @@ -548,6 +547,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, I915_GTT_PAGE_SIZE_2M)))) { vaddr = px_vaddr(pd); vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; + clflush_cache_range(vaddr, PAGE_SIZE); page_size = I915_GTT_PAGE_SIZE_64K; /* @@ -568,6 +568,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma, for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); + clflush_cache_range(vaddr, PAGE_SIZE); } } @@ -650,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch[0]->encode = gen8_pte_encode(px_dma(vm->scratch[0]), - I915_CACHE_LLC, pte_flags); + I915_CACHE_NONE, pte_flags); for (i = 1; i <= vm->top; i++) { struct drm_i915_gem_object *obj; @@ -666,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } fill_px(obj, vm->scratch[i - 1]->encode); - obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); + obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE); vm->scratch[i] = obj; } @@ -751,7 +752,8 @@ err_pd: * space. * */ -struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, + unsigned long lmem_pt_obj_flags) { struct i915_ppgtt *ppgtt; int err; @@ -760,7 +762,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) if (!ppgtt) return ERR_PTR(-ENOMEM); - ppgtt_init(ppgtt, gt); + ppgtt_init(ppgtt, gt, lmem_pt_obj_flags); ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t)); diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h index b9028c2ad3c7..f541d19264b4 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h @@ -12,7 +12,9 @@ struct i915_address_space; struct intel_gt; enum i915_cache_level; -struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt); +struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt, + unsigned long lmem_pt_obj_flags); + u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags); diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 745e84c72c90..ba083d800a08 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce, */ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); - if (!err && ce->ring->vma->obj) + if (!err) err = i915_gem_object_lock(ce->ring->vma->obj, ww); if (!err && ce->state) err = i915_gem_object_lock(ce->state->obj, ww); @@ -228,17 +228,19 @@ int __intel_context_do_pin_ww(struct intel_context *ce, if (err) return err; - err = i915_active_acquire(&ce->active); + err = ce->ops->pre_pin(ce, ww, &vaddr); if (err) goto err_ctx_unpin; - err = ce->ops->pre_pin(ce, ww, &vaddr); + err = i915_active_acquire(&ce->active); if (err) - goto err_release; + goto err_post_unpin; err = mutex_lock_interruptible(&ce->pin_mutex); if (err) - goto err_post_unpin; + goto err_release; + + intel_engine_pm_might_get(ce->engine); if (unlikely(intel_context_is_closed(ce))) { err = -ENOENT; @@ -271,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce, err_unlock: mutex_unlock(&ce->pin_mutex); +err_release: + i915_active_release(&ce->active); err_post_unpin: if (!handoff) ce->ops->post_unpin(ce); -err_release: - i915_active_release(&ce->active); err_ctx_unpin: intel_context_post_unpin(ce); @@ -362,8 +364,9 @@ static int __intel_context_active(struct i915_active *active) return 0; } -static int sw_fence_dummy_notify(struct i915_sw_fence *sf, - enum i915_sw_fence_notify state) +static int +sw_fence_dummy_notify(struct i915_sw_fence *sf, + enum i915_sw_fence_notify state) { return NOTIFY_DONE; } @@ -394,19 +397,22 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) spin_lock_init(&ce->guc_state.lock); INIT_LIST_HEAD(&ce->guc_state.fences); + INIT_LIST_HEAD(&ce->guc_state.requests); + + ce->guc_id.id = GUC_INVALID_LRC_ID; + INIT_LIST_HEAD(&ce->guc_id.link); - spin_lock_init(&ce->guc_active.lock); - INIT_LIST_HEAD(&ce->guc_active.requests); + INIT_LIST_HEAD(&ce->destroyed_link); - ce->guc_id = GUC_INVALID_LRC_ID; - INIT_LIST_HEAD(&ce->guc_id_link); + INIT_LIST_HEAD(&ce->parallel.child_list); /* * Initialize fence to be complete as this is expected to be complete * unless there is a pending schedule disable outstanding. */ - i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify); - i915_sw_fence_commit(&ce->guc_blocked); + i915_sw_fence_init(&ce->guc_state.blocked, + sw_fence_dummy_notify); + i915_sw_fence_commit(&ce->guc_state.blocked); i915_active_init(&ce->active, __intel_context_active, __intel_context_retire, 0); @@ -414,12 +420,20 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) void intel_context_fini(struct intel_context *ce) { + struct intel_context *child, *next; + if (ce->timeline) intel_timeline_put(ce->timeline); i915_vm_put(ce->vm); + /* Need to put the creation ref for the children */ + if (intel_context_is_parent(ce)) + for_each_child_safe(ce, child, next) + intel_context_put(child); + mutex_destroy(&ce->pin_mutex); i915_active_fini(&ce->active); + i915_sw_fence_fini(&ce->guc_state.blocked); } void i915_context_module_exit(void) @@ -515,24 +529,53 @@ retry: struct i915_request *intel_context_find_active_request(struct intel_context *ce) { + struct intel_context *parent = intel_context_to_parent(ce); struct i915_request *rq, *active = NULL; unsigned long flags; GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); - spin_lock_irqsave(&ce->guc_active.lock, flags); - list_for_each_entry_reverse(rq, &ce->guc_active.requests, + /* + * We search the parent list to find an active request on the submitted + * context. The parent list contains the requests for all the contexts + * in the relationship so we have to do a compare of each request's + * context. + */ + spin_lock_irqsave(&parent->guc_state.lock, flags); + list_for_each_entry_reverse(rq, &parent->guc_state.requests, sched.link) { + if (rq->context != ce) + continue; if (i915_request_completed(rq)) break; active = rq; } - spin_unlock_irqrestore(&ce->guc_active.lock, flags); + spin_unlock_irqrestore(&parent->guc_state.lock, flags); return active; } +void intel_context_bind_parent_child(struct intel_context *parent, + struct intel_context *child) +{ + /* + * Callers responsibility to validate that this function is used + * correctly but we use GEM_BUG_ON here ensure that they do. + */ + GEM_BUG_ON(!intel_engine_uses_guc(parent->engine)); + GEM_BUG_ON(intel_context_is_pinned(parent)); + GEM_BUG_ON(intel_context_is_child(parent)); + GEM_BUG_ON(intel_context_is_pinned(child)); + GEM_BUG_ON(intel_context_is_child(child)); + GEM_BUG_ON(intel_context_is_parent(child)); + + parent->parallel.child_index = parent->parallel.number_children++; + list_add_tail(&child->parallel.child_link, + &parent->parallel.child_list); + child->parallel.parent = parent; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_context.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index c41098950746..246c37d72cd7 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -44,6 +44,54 @@ void intel_context_free(struct intel_context *ce); int intel_context_reconfigure_sseu(struct intel_context *ce, const struct intel_sseu sseu); +#define PARENT_SCRATCH_SIZE PAGE_SIZE + +static inline bool intel_context_is_child(struct intel_context *ce) +{ + return !!ce->parallel.parent; +} + +static inline bool intel_context_is_parent(struct intel_context *ce) +{ + return !!ce->parallel.number_children; +} + +static inline bool intel_context_is_pinned(struct intel_context *ce); + +static inline struct intel_context * +intel_context_to_parent(struct intel_context *ce) +{ + if (intel_context_is_child(ce)) { + /* + * The parent holds ref count to the child so it is always safe + * for the parent to access the child, but the child has a + * pointer to the parent without a ref. To ensure this is safe + * the child should only access the parent pointer while the + * parent is pinned. + */ + GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); + + return ce->parallel.parent; + } else { + return ce; + } +} + +static inline bool intel_context_is_parallel(struct intel_context *ce) +{ + return intel_context_is_child(ce) || intel_context_is_parent(ce); +} + +void intel_context_bind_parent_child(struct intel_context *parent, + struct intel_context *child); + +#define for_each_child(parent, ce)\ + list_for_each_entry(ce, &(parent)->parallel.child_list,\ + parallel.child_link) +#define for_each_child_safe(parent, ce, cn)\ + list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ + parallel.child_link) + /** * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context * @ce - the context @@ -193,7 +241,13 @@ intel_context_timeline_lock(struct intel_context *ce) struct intel_timeline *tl = ce->timeline; int err; - err = mutex_lock_interruptible(&tl->mutex); + if (intel_context_is_parent(ce)) + err = mutex_lock_interruptible_nested(&tl->mutex, 0); + else if (intel_context_is_child(ce)) + err = mutex_lock_interruptible_nested(&tl->mutex, + ce->parallel.child_index + 1); + else + err = mutex_lock_interruptible(&tl->mutex); if (err) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index e54351a170e2..9e0177dc5484 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -55,9 +55,13 @@ struct intel_context_ops { void (*reset)(struct intel_context *ce); void (*destroy)(struct kref *kref); - /* virtual engine/context interface */ + /* virtual/parallel engine/context interface */ struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, - unsigned int count); + unsigned int count, + unsigned long flags); + struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, unsigned int sibling); }; @@ -112,6 +116,8 @@ struct intel_context { #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 #define CONTEXT_NOPREEMPT 8 #define CONTEXT_LRCA_DIRTY 9 +#define CONTEXT_GUC_INIT 10 +#define CONTEXT_PERMA_PIN 11 struct { u64 timeout_us; @@ -152,52 +158,141 @@ struct intel_context { /** sseu: Control eu/slice partitioning */ struct intel_sseu sseu; + /** + * pinned_contexts_link: List link for the engine's pinned contexts. + * This is only used if this is a perma-pinned kernel context and + * the list is assumed to only be manipulated during driver load + * or unload time so no mutex protection currently. + */ + struct list_head pinned_contexts_link; + u8 wa_bb_page; /* if set, page num reserved for context workarounds */ struct { - /** lock: protects everything in guc_state */ + /** @lock: protects everything in guc_state */ spinlock_t lock; /** - * sched_state: scheduling state of this context using GuC + * @sched_state: scheduling state of this context using GuC * submission */ - u16 sched_state; + u32 sched_state; /* - * fences: maintains of list of requests that have a submit - * fence related to GuC submission + * @fences: maintains a list of requests that are currently + * being fenced until a GuC operation completes */ struct list_head fences; + /** + * @blocked: fence used to signal when the blocking of a + * context's submissions is complete. + */ + struct i915_sw_fence blocked; + /** @number_committed_requests: number of committed requests */ + int number_committed_requests; + /** @requests: list of active requests on this context */ + struct list_head requests; + /** @prio: the context's current guc priority */ + u8 prio; + /** + * @prio_count: a counter of the number requests in flight in + * each priority bucket + */ + u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; } guc_state; struct { - /** lock: protects everything in guc_active */ - spinlock_t lock; - /** requests: active requests on this context */ - struct list_head requests; - } guc_active; - - /* GuC scheduling state flags that do not require a lock. */ - atomic_t guc_sched_state_no_lock; - - /* GuC LRC descriptor ID */ - u16 guc_id; + /** + * @id: handle which is used to uniquely identify this context + * with the GuC, protected by guc->submission_state.lock + */ + u16 id; + /** + * @ref: the number of references to the guc_id, when + * transitioning in and out of zero protected by + * guc->submission_state.lock + */ + atomic_t ref; + /** + * @link: in guc->guc_id_list when the guc_id has no refs but is + * still valid, protected by guc->submission_state.lock + */ + struct list_head link; + } guc_id; - /* GuC LRC descriptor reference count */ - atomic_t guc_id_ref; + /** + * @destroyed_link: link in guc->submission_state.destroyed_contexts, in + * list when context is pending to be destroyed (deregistered with the + * GuC), protected by guc->submission_state.lock + */ + struct list_head destroyed_link; - /* - * GuC ID link - in list when unpinned but guc_id still valid in GuC + /** @parallel: sub-structure for parallel submission members */ + struct { + union { + /** + * @child_list: parent's list of children + * contexts, no protection as immutable after context + * creation + */ + struct list_head child_list; + /** + * @child_link: child's link into parent's list of + * children + */ + struct list_head child_link; + }; + /** @parent: pointer to parent if child */ + struct intel_context *parent; + /** + * @last_rq: last request submitted on a parallel context, used + * to insert submit fences between requests in the parallel + * context + */ + struct i915_request *last_rq; + /** + * @fence_context: fence context composite fence when doing + * parallel submission + */ + u64 fence_context; + /** + * @seqno: seqno for composite fence when doing parallel + * submission + */ + u32 seqno; + /** @number_children: number of children if parent */ + u8 number_children; + /** @child_index: index into child_list if child */ + u8 child_index; + /** @guc: GuC specific members for parallel submission */ + struct { + /** @wqi_head: head pointer in work queue */ + u16 wqi_head; + /** @wqi_tail: tail pointer in work queue */ + u16 wqi_tail; + /** + * @parent_page: page in context state (ce->state) used + * by parent for work queue, process descriptor + */ + u8 parent_page; + } guc; + } parallel; + +#ifdef CONFIG_DRM_I915_SELFTEST + /** + * @drop_schedule_enable: Force drop of schedule enable G2H for selftest */ - struct list_head guc_id_link; + bool drop_schedule_enable; - /* GuC context blocked fence */ - struct i915_sw_fence guc_blocked; + /** + * @drop_schedule_disable: Force drop of schedule disable G2H for + * selftest + */ + bool drop_schedule_disable; - /* - * GuC priority management + /** + * @drop_deregister: Force drop of deregister G2H for selftest */ - u8 guc_prio; - u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM]; + bool drop_deregister; +#endif }; #endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 87579affb952..08559ace0ada 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -2,6 +2,7 @@ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ +#include <asm/cacheflush.h> #include <drm/drm_util.h> #include <linux/hashtable.h> @@ -175,6 +176,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SEQNO 0x40 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) #define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32)) +#define I915_GEM_HWS_PXP 0x60 +#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32)) #define I915_GEM_HWS_SCRATCH 0x80 #define I915_HWS_CSB_BUF0_INDEX 0x10 @@ -273,15 +276,25 @@ static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine) static inline bool intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) { - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) return false; return intel_engine_has_preemption(engine); } +#define FORCE_VIRTUAL BIT(0) struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count); + unsigned int count, unsigned long flags); + +static inline struct intel_context * +intel_engine_create_parallel(struct intel_engine_cs **engines, + unsigned int num_engines, + unsigned int width) +{ + GEM_BUG_ON(!engines[0]->cops->create_parallel); + return engines[0]->cops->create_parallel(engines, num_engines, width); +} static inline bool intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine) @@ -300,7 +313,7 @@ intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine) static inline bool intel_engine_has_heartbeat(const struct intel_engine_cs *engine) { - if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) return false; if (intel_engine_is_virtual(engine)) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 0d9105a31d84..352254e001b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -290,7 +290,8 @@ static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) GEM_DEBUG_WARN_ON(iir); } -static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) +static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, + u8 logical_instance) { const struct engine_info *info = &intel_engines[id]; struct drm_i915_private *i915 = gt->i915; @@ -320,9 +321,42 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); + INIT_LIST_HEAD(&engine->pinned_contexts_list); engine->id = id; engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); + if (GRAPHICS_VER(gt->i915) >= 11) { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VCS4] = GEN11_GRDOM_MEDIA5, + [VCS5] = GEN11_GRDOM_MEDIA6, + [VCS6] = GEN11_GRDOM_MEDIA7, + [VCS7] = GEN11_GRDOM_MEDIA8, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + [VECS2] = GEN11_GRDOM_VECS3, + [VECS3] = GEN11_GRDOM_VECS4, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } else { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; @@ -334,6 +368,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->class = info->class; engine->instance = info->instance; + engine->logical_mask = BIT(logical_instance); __sprint_engine_name(engine); engine->props.heartbeat_interval_ms = @@ -360,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) DRIVER_CAPS(i915)->has_logical_contexts = true; ewma__engine_latency_init(&engine->latency); - seqcount_init(&engine->stats.lock); + seqcount_init(&engine->stats.execlists.lock); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); @@ -398,7 +433,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { - if (GRAPHICS_VER(i915) >= 9) + if (GRAPHICS_VER(i915) >= 9 && + engine->gt->info.sfc_mask & BIT(engine->instance)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; } @@ -474,18 +510,25 @@ void intel_engines_free(struct intel_gt *gt) } static -bool gen11_vdbox_has_sfc(struct drm_i915_private *i915, +bool gen11_vdbox_has_sfc(struct intel_gt *gt, unsigned int physical_vdbox, unsigned int logical_vdbox, u16 vdbox_mask) { + struct drm_i915_private *i915 = gt->i915; + /* * In Gen11, only even numbered logical VDBOXes are hooked * up to an SFC (Scaler & Format Converter) unit. * In Gen12, Even numbered physical instance always are connected * to an SFC. Odd numbered physical instances have SFC only if * previous even instance is fused off. + * + * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field + * in the fuse register that tells us whether a specific SFC is present. */ - if (GRAPHICS_VER(i915) == 12) + if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) + return false; + else if (GRAPHICS_VER(i915) == 12) return (physical_vdbox % 2 == 0) || !(BIT(physical_vdbox - 1) & vdbox_mask); else if (GRAPHICS_VER(i915) == 11) @@ -512,7 +555,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; unsigned int logical_vdbox = 0; unsigned int i; - u32 media_fuse; + u32 media_fuse, fuse1; u16 vdbox_mask; u16 vebox_mask; @@ -534,6 +577,13 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> GEN11_GT_VEBOX_DISABLE_SHIFT; + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1); + gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); + } else { + gt->info.sfc_mask = ~0; + } + for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(gt, _VCS(i))) { vdbox_mask &= ~BIT(i); @@ -546,7 +596,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) continue; } - if (gen11_vdbox_has_sfc(i915, i, logical_vdbox, vdbox_mask)) + if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) gt->info.vdbox_sfc_access |= BIT(i); logical_vdbox++; } @@ -572,6 +622,37 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) return info->engine_mask; } +static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, + u8 class, const u8 *map, u8 num_instances) +{ + int i, j; + u8 current_logical_id = 0; + + for (j = 0; j < num_instances; ++j) { + for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { + if (!HAS_ENGINE(gt, i) || + intel_engines[i].class != class) + continue; + + if (intel_engines[i].instance == map[j]) { + logical_ids[intel_engines[i].instance] = + current_logical_id++; + break; + } + } + } +} + +static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) +{ + int i; + u8 map[MAX_ENGINE_INSTANCE + 1]; + + for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) + map[i] = i; + populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map)); +} + /** * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers * @gt: pointer to struct intel_gt @@ -583,7 +664,8 @@ int intel_engines_init_mmio(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; const unsigned int engine_mask = init_engine_mask(gt); unsigned int mask = 0; - unsigned int i; + unsigned int i, class; + u8 logical_ids[MAX_ENGINE_INSTANCE + 1]; int err; drm_WARN_ON(&i915->drm, engine_mask == 0); @@ -593,15 +675,23 @@ int intel_engines_init_mmio(struct intel_gt *gt) if (i915_inject_probe_failure(i915)) return -ENODEV; - for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { - if (!HAS_ENGINE(gt, i)) - continue; + for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { + setup_logical_ids(gt, logical_ids, class); - err = intel_engine_setup(gt, i); - if (err) - goto cleanup; + for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { + u8 instance = intel_engines[i].instance; + + if (intel_engines[i].class != class || + !HAS_ENGINE(gt, i)) + continue; - mask |= BIT(i); + err = intel_engine_setup(gt, i, + logical_ids[instance]); + if (err) + goto cleanup; + + mask |= BIT(i); + } } /* @@ -875,6 +965,8 @@ intel_engine_create_pinned_context(struct intel_engine_cs *engine, return ERR_PTR(err); } + list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list); + /* * Give our perma-pinned kernel timelines a separate lockdep class, * so that we can use them from within the normal user timelines @@ -897,6 +989,7 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce) list_del(&ce->timeline->engine_link); mutex_unlock(&hwsp->vm->mutex); + list_del(&ce->pinned_contexts_link); intel_context_unpin(ce); intel_context_put(ce); } @@ -1163,16 +1256,16 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, u32 mmio_base = engine->mmio_base; int slice; int subslice; + int iter; memset(instdone, 0, sizeof(*instdone)); - switch (GRAPHICS_VER(i915)) { - default: + if (GRAPHICS_VER(i915) >= 8) { instdone->instdone = intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); if (engine->id != RCS0) - break; + return; instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); @@ -1182,21 +1275,39 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, instdone->slice_common_extra[1] = intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2); } - for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { - instdone->sampler[slice][subslice] = - read_subslice_reg(engine, slice, subslice, - GEN7_SAMPLER_INSTDONE); - instdone->row[slice][subslice] = - read_subslice_reg(engine, slice, subslice, - GEN7_ROW_INSTDONE); + + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { + for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) { + instdone->sampler[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_SAMPLER_INSTDONE); + instdone->row[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_ROW_INSTDONE); + } + } else { + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { + instdone->sampler[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_SAMPLER_INSTDONE); + instdone->row[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + GEN7_ROW_INSTDONE); + } } - break; - case 7: + + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { + for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) + instdone->geom_svg[slice][subslice] = + read_subslice_reg(engine, slice, subslice, + XEHPG_INSTDONE_GEOM_SVG); + } + } else if (GRAPHICS_VER(i915) >= 7) { instdone->instdone = intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); if (engine->id != RCS0) - break; + return; instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); @@ -1204,22 +1315,15 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); instdone->row[0][0] = intel_uncore_read(uncore, GEN7_ROW_INSTDONE); - - break; - case 6: - case 5: - case 4: + } else if (GRAPHICS_VER(i915) >= 4) { instdone->instdone = intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); if (engine->id == RCS0) /* HACK: Using the wrong struct member */ instdone->slice_common = intel_uncore_read(uncore, GEN4_INSTDONE1); - break; - case 3: - case 2: + } else { instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); - break; } } @@ -1604,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, static void print_request_ring(struct drm_printer *m, struct i915_request *rq) { + struct i915_vma_snapshot *vsnap = &rq->batch_snapshot; void *ring; int size; + if (!i915_vma_snapshot_present(vsnap)) + vsnap = NULL; + drm_printf(m, "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", rq->head, rq->postfix, rq->tail, - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u, + vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u); size = rq->tail - rq->head; if (rq->tail < rq->head) @@ -1843,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, - ktime_t *now) -{ - ktime_t total = engine->stats.total; - - /* - * If the engine is executing something at the moment - * add it to the total. - */ - *now = ktime_get(); - if (READ_ONCE(engine->stats.active)) - total = ktime_add(total, ktime_sub(*now, engine->stats.start)); - - return total; -} - /** * intel_engine_get_busy_time() - Return current accumulated engine busyness * @engine: engine to report on @@ -1868,29 +1960,21 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, */ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) { - unsigned int seq; - ktime_t total; - - do { - seq = read_seqcount_begin(&engine->stats.lock); - total = __intel_engine_get_busy_time(engine, now); - } while (read_seqcount_retry(&engine->stats.lock, seq)); - - return total; + return engine->busyness(engine, now); } struct intel_context * intel_engine_create_virtual(struct intel_engine_cs **siblings, - unsigned int count) + unsigned int count, unsigned long flags) { if (count == 0) return ERR_PTR(-EINVAL); - if (count == 1) + if (count == 1 && !(flags & FORCE_VIRTUAL)) return intel_context_create(siblings[0]); GEM_BUG_ON(!siblings[0]->cops->create_virtual); - return siblings[0]->cops->create_virtual(siblings, count); + return siblings[0]->cops->create_virtual(siblings, count, flags); } struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 74775ae961b2..a3698f611f45 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -207,7 +207,7 @@ out: void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine) { - if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) return; next_heartbeat(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 1f07ac4e0672..a1334b48dde7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -162,6 +162,19 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) unsigned long flags; bool result = true; + /* + * This is execlist specific behaviour intended to ensure the GPU is + * idle by switching to a known 'safe' context. With GuC submission, the + * same idle guarantee is achieved by other means (disabling + * scheduling). Further, switching to a 'safe' context has no effect + * with GuC submission as the scheduler can just switch back again. + * + * FIXME: Move this backend scheduler specific behaviour into the + * scheduler backend. + */ + if (intel_engine_uses_guc(engine)) + return true; + /* GPU is pointing to the void, as good as in the kernel context. */ if (intel_gt_is_wedged(engine->gt)) return true; @@ -298,6 +311,29 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) intel_engine_init_heartbeat(engine); } +/** + * intel_engine_reset_pinned_contexts - Reset the pinned contexts of + * an engine. + * @engine: The engine whose pinned contexts we want to reset. + * + * Typically the pinned context LMEM images lose or get their content + * corrupted on suspend. This function resets their images. + */ +void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + list_for_each_entry(ce, &engine->pinned_contexts_list, + pinned_contexts_link) { + /* kernel context gets reset at __engine_unpark() */ + if (ce == engine->kernel_context) + continue; + + dbg_poison_ce(ce); + ce->ops->reset(ce); + } +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_engine_pm.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 70ea46d6cfb0..d68675925b79 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -6,9 +6,11 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "i915_drv.h" #include "i915_request.h" #include "intel_engine_types.h" #include "intel_wakeref.h" +#include "intel_gt_pm.h" static inline bool intel_engine_pm_is_awake(const struct intel_engine_cs *engine) @@ -16,6 +18,11 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine) return intel_wakeref_is_active(&engine->wakeref); } +static inline void __intel_engine_pm_get(struct intel_engine_cs *engine) +{ + __intel_wakeref_get(&engine->wakeref); +} + static inline void intel_engine_pm_get(struct intel_engine_cs *engine) { intel_wakeref_get(&engine->wakeref); @@ -26,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) return intel_wakeref_get_if_active(&engine->wakeref); } +static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine) +{ + if (!intel_engine_is_virtual(engine)) { + intel_wakeref_might_get(&engine->wakeref); + } else { + struct intel_gt *gt = engine->gt; + struct intel_engine_cs *tengine; + intel_engine_mask_t tmp, mask = engine->mask; + + for_each_engine_masked(tengine, gt, mask, tmp) + intel_wakeref_might_get(&tengine->wakeref); + } + intel_gt_pm_might_get(engine->gt); +} + static inline void intel_engine_pm_put(struct intel_engine_cs *engine) { intel_wakeref_put(&engine->wakeref); @@ -47,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) intel_wakeref_unlock_wait(&engine->wakeref); } +static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine) +{ + if (!intel_engine_is_virtual(engine)) { + intel_wakeref_might_put(&engine->wakeref); + } else { + struct intel_gt *gt = engine->gt; + struct intel_engine_cs *tengine; + intel_engine_mask_t tmp, mask = engine->mask; + + for_each_engine_masked(tengine, gt, mask, tmp) + intel_wakeref_might_put(&tengine->wakeref); + } + intel_gt_pm_might_put(engine->gt); +} + static inline struct i915_request * intel_engine_create_kernel_request(struct intel_engine_cs *engine) { @@ -69,4 +106,6 @@ intel_engine_create_kernel_request(struct intel_engine_cs *engine) void intel_engine_init__pm(struct intel_engine_cs *engine); +void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine); + #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h index 24fbdd94351a..8e762d683e50 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h @@ -15,45 +15,46 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - if (engine->stats.active) { - engine->stats.active++; + if (stats->active) { + stats->active++; return; } /* The writer is serialised; but the pmu reader may be from hardirq */ local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.start = ktime_get(); - engine->stats.active++; + stats->start = ktime_get(); + stats->active++; - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); - GEM_BUG_ON(!engine->stats.active); + GEM_BUG_ON(!stats->active); } static inline void intel_engine_context_out(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - GEM_BUG_ON(!engine->stats.active); - if (engine->stats.active > 1) { - engine->stats.active--; + GEM_BUG_ON(!stats->active); + if (stats->active > 1) { + stats->active--; return; } local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.active--; - engine->stats.total = - ktime_add(engine->stats.total, - ktime_sub(ktime_get(), engine->stats.start)); + stats->active--; + stats->total = ktime_add(stats->total, + ktime_sub(ktime_get(), stats->start)); - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index ed91bcff20eb..36365bdbe1ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -67,8 +67,11 @@ struct intel_instdone { /* The following exist only in the RCS engine */ u32 slice_common; u32 slice_common_extra[2]; - u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; - u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; + u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; + u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; + + /* Added in XeHPG */ + u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES]; }; /* @@ -254,6 +257,55 @@ struct intel_engine_execlists { #define INTEL_ENGINE_CS_MAX_NAME 8 +struct intel_engine_execlists_stats { + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + + /** + * @lock: Lock protecting the below fields. + */ + seqcount_t lock; + + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases where + * engine is currently busy (active > 0). + */ + ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; +}; + +struct intel_engine_guc_stats { + /** + * @running: Active state of the engine when busyness was last sampled. + */ + bool running; + + /** + * @prev_total: Previous value of total runtime clock cycles. + */ + u32 prev_total; + + /** + * @total_gt_clks: Total gt clock cycles this engine was busy. + */ + u64 total_gt_clks; + + /** + * @start_gt_clk: GT clock time of last idle to active transition. + */ + u64 start_gt_clk; +}; + struct intel_engine_cs { struct drm_i915_private *i915; struct intel_gt *gt; @@ -266,6 +318,14 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; + u32 reset_domain; + /** + * @logical_mask: logical mask of engine, reported to user space via + * query IOCTL and used to communicate with the GuC in logical space. + * The logical instance of a physical engine can change based on product + * and fusing. + */ + intel_engine_mask_t logical_mask; u8 class; u8 instance; @@ -304,6 +364,13 @@ struct intel_engine_cs { struct intel_context *kernel_context; /* pinned */ + /** + * pinned_contexts_list: List of pinned contexts. This list is only + * assumed to be manipulated during driver load- or unload time and + * does therefore not have any additional protection. + */ + struct list_head pinned_contexts_list; + intel_engine_mask_t saturated; /* submitting semaphores too late? */ struct { @@ -422,6 +489,12 @@ struct intel_engine_cs { void (*add_active_request)(struct i915_request *rq); void (*remove_active_request)(struct i915_request *rq); + /* + * Get engine busyness and the time at which the busyness was sampled. + */ + ktime_t (*busyness)(struct intel_engine_cs *engine, + ktime_t *now); + struct intel_engine_execlists execlists; /* @@ -471,30 +544,10 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - - /** - * @lock: Lock protecting the below fields. - */ - seqcount_t lock; - - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; + union { + struct intel_engine_execlists_stats execlists; + struct intel_engine_guc_stats guc; + }; /** * @rps: Utilisation at last RPS sampling. @@ -546,7 +599,7 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine) static inline bool intel_engine_has_timeslices(const struct intel_engine_cs *engine) { - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (!CONFIG_DRM_I915_TIMESLICE_DURATION) return false; return engine->flags & I915_ENGINE_HAS_TIMESLICES; @@ -578,4 +631,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ (instdone_has_subslice(dev_priv_, sseu_, slice_, \ subslice_))) + +#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \ + for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \ + (iter_) < GEN_MAX_SUBSLICES; \ + (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \ + (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \ + for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_))) + #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index de5f9c86b9a4..a69df5e9e77a 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -201,7 +201,8 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); static struct i915_request * __active_request(const struct intel_timeline * const tl, @@ -2140,10 +2141,6 @@ static void __execlists_unhold(struct i915_request *rq) if (p->flags & I915_DEPENDENCY_WEAK) continue; - /* Propagate any change in error status */ - if (rq->fence.error) - i915_request_set_error_once(w, rq->fence.error); - if (w->engine != rq->engine) continue; @@ -2189,7 +2186,8 @@ struct execlists_capture { static void execlists_capture_work(struct work_struct *work) { struct execlists_capture *cap = container_of(work, typeof(*cap), work); - const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN; struct intel_engine_cs *engine = cap->rq->engine; struct intel_gt_coredump *gt = cap->error->gt; struct intel_engine_capture_vma *vma; @@ -2565,7 +2563,7 @@ __execlists_context_pre_pin(struct intel_context *ce, if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) { lrc_init_state(ce, engine, *vaddr); - __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size); + __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size); } return 0; @@ -2791,6 +2789,8 @@ static void execlists_sanitize(struct intel_engine_cs *engine) /* And scrub the dirty cachelines for the HWSP */ clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + + intel_engine_reset_pinned_contexts(engine); } static void enable_error_interrupt(struct intel_engine_cs *engine) @@ -3294,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine) lrc_fini_wa_ctx(engine); } +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + ktime_t total = stats->total; + + /* + * If the engine is executing something at the moment + * add it to the total. + */ + *now = ktime_get(); + if (READ_ONCE(stats->active)) + total = ktime_add(total, ktime_sub(*now, stats->start)); + + return total; +} + +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + unsigned int seq; + ktime_t total; + + do { + seq = read_seqcount_begin(&stats->lock); + total = __execlists_engine_busyness(engine, now); + } while (read_seqcount_retry(&stats->lock, seq)); + + return total; +} + static void logical_ring_default_vfuncs(struct intel_engine_cs *engine) { @@ -3341,7 +3373,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_HAS_SEMAPHORES; if (can_preempt(engine)) { engine->flags |= I915_ENGINE_HAS_PREEMPTION; - if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (CONFIG_DRM_I915_TIMESLICE_DURATION) engine->flags |= I915_ENGINE_HAS_TIMESLICES; } } @@ -3350,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_bb_start = gen8_emit_bb_start; else engine->emit_bb_start = gen8_emit_bb_start_noarb; + + engine->busyness = execlists_engine_busyness; } static void logical_ring_default_irqs(struct intel_engine_cs *engine) @@ -3786,7 +3820,8 @@ unlock: } static struct intel_context * -execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct virtual_engine *ve; unsigned int n; @@ -3879,6 +3914,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count) ve->siblings[ve->num_siblings++] = sibling; ve->base.mask |= sibling->mask; + ve->base.logical_mask |= sibling->logical_mask; /* * All physical engines must be compatible for their emission diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index de3ac58fceec..cbc6d2b1fd9e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -3,12 +3,14 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/agp_backend.h> #include <linux/stop_machine.h> #include <asm/set_memory.h> #include <asm/smp.h> #include <drm/i915_drm.h> +#include <drm/intel-gtt.h> #include "gem/i915_gem_lmem.h" @@ -104,7 +106,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915) * Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if (!intel_vtd_active()) + if (!intel_vtd_active(i915)) return false; if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) @@ -116,17 +118,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915) return false; } -void i915_ggtt_suspend(struct i915_ggtt *ggtt) +/** + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM + * @vm: The VM to suspend the mappings for + * + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + */ +void i915_ggtt_suspend_vm(struct i915_address_space *vm) { struct i915_vma *vma, *vn; int open; - mutex_lock(&ggtt->vm.mutex); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); + + mutex_lock(&vm->mutex); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); i915_vma_wait_for_bind(vma); @@ -139,11 +150,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt) } } - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->invalidate(ggtt); - atomic_set(&ggtt->vm.open, open); + vm->clear_range(vm, 0, vm->total); - mutex_unlock(&ggtt->vm.mutex); + atomic_set(&vm->open, open); + + mutex_unlock(&vm->mutex); +} + +void i915_ggtt_suspend(struct i915_ggtt *ggtt) +{ + i915_ggtt_suspend_vm(&ggtt->vm); + ggtt->invalidate(ggtt); intel_gt_check_and_clear_faults(ggtt->vm.gt); } @@ -192,7 +209,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; if (flags & PTE_LM) pte |= GEN12_GGTT_PTE_LM; @@ -644,7 +661,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) struct i915_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(ggtt->vm.gt); + ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -727,7 +744,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) atomic_set(&ggtt->vm.open, 0); - rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ flush_workqueue(ggtt->vm.i915->wq); mutex_lock(&ggtt->vm.mutex); @@ -814,6 +830,21 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) return 0; } +static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915) +{ + /* + * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset + * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset + */ + GEM_BUG_ON(GRAPHICS_VER(i915) < 6); + return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M; +} + +static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915) +{ + return gen6_gttmmadr_size(i915) / 2; +} + static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { struct drm_i915_private *i915 = ggtt->vm.i915; @@ -822,8 +853,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) u32 pte_flags; int ret; - /* For Modern GENs the PTEs and register space are split in the BAR */ - phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; + GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915)); + phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915); /* * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range @@ -910,6 +941,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) size = gen8_get_total_gtt_size(snb_gmch_ctl); ggtt->vm.alloc_pt_dma = alloc_pt_dma; + ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; ggtt->vm.cleanup = gen6_gmch_remove; @@ -1201,7 +1233,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) if (ret) return ret; - if (intel_vtd_active()) + if (intel_vtd_active(i915)) drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; @@ -1238,37 +1270,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) ggtt->invalidate(ggtt); } -void i915_ggtt_resume(struct i915_ggtt *ggtt) +/** + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM + * @vm: The VM to restore the mappings for + * + * Restore the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + * + * Returns %true if restoring the mapping for any object that was in a write + * domain before suspend. + */ +bool i915_ggtt_resume_vm(struct i915_address_space *vm) { struct i915_vma *vma; - bool flush = false; + bool write_domain_objs = false; int open; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + vm->clear_range(vm, 0, vm->total); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry(vma, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; unsigned int was_bound = atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - vma->ops->bind_vma(&ggtt->vm, NULL, vma, + vma->ops->bind_vma(vm, NULL, vma, obj ? obj->cache_level : 0, was_bound); if (obj) { /* only used during resume => exclusive access */ - flush |= fetch_and_zero(&obj->write_domain); + write_domain_objs |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; } } - atomic_set(&ggtt->vm.open, open); + atomic_set(&vm->open, open); + + return write_domain_objs; +} + +void i915_ggtt_resume(struct i915_ggtt *ggtt) +{ + bool flush; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + flush = i915_ggtt_resume_vm(&ggtt->vm); + ggtt->invalidate(ggtt); if (flush) @@ -1373,13 +1427,40 @@ err_st_alloc: } static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) +add_padding_pages(unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + + return sg; +} + +static struct scatterlist * +remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) { unsigned int row; + if (!width || !height) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + for (row = 0; row < height; row++) { unsigned int left = width * I915_GTT_PAGE_SIZE; @@ -1415,18 +1496,98 @@ remap_pages(struct drm_i915_gem_object *obj, unsigned int offset, if (!left) continue; + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); + } + + *gtt_offset += alignment_pad + dst_stride * height; + + return sg; +} + +static struct scatterlist * +remap_contiguous_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, + unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + struct scatterlist *iter; + unsigned int offset; + + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); + GEM_BUG_ON(!iter); + + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) + return sg; - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); +} + +static struct scatterlist * +remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, unsigned int alignment_pad, + unsigned int size, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + if (!size) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); + sg = sg_next(sg); + + *gtt_offset += alignment_pad + size; + + return sg; +} + +static struct scatterlist * +remap_color_plane_pages(const struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj, + int color_plane, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; + + if (rem_info->plane[color_plane].linear) + sg = remap_linear_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].size, + st, sg, + gtt_offset); + + else + sg = remap_tiled_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].width, + rem_info->plane[color_plane].height, + rem_info->plane[color_plane].src_stride, + rem_info->plane[color_plane].dst_stride, + st, sg, + gtt_offset); return sg; } @@ -1439,6 +1600,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info, struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; + unsigned int gtt_offset = 0; int ret = -ENOMEM; int i; @@ -1454,12 +1616,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - sg = remap_pages(obj, rem_info->plane[i].offset, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, - st, sg); - } + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); i915_sg_trim(st); @@ -1481,9 +1639,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, struct drm_i915_gem_object *obj) { struct sg_table *st; - struct scatterlist *sg, *iter; + struct scatterlist *sg; unsigned int count = view->partial.size; - unsigned int offset; int ret = -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -1494,34 +1651,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; st->nents = 0; - do { - unsigned int len; - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ - return st; - } - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); + return st; err_sg_alloc: kfree(st); diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 1c3af0fc0456..f8253012d166 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -28,10 +28,13 @@ #define INSTR_26_TO_24_MASK 0x7000000 #define INSTR_26_TO_24_SHIFT 24 +#define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT) + /* * Memory interface instructions used by the kernel */ -#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) +#define MI_INSTR(opcode, flags) \ + (__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags)) /* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */ #define MI_GLOBAL_GTT (1<<22) @@ -57,6 +60,7 @@ #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) #define MI_SUSPEND_FLUSH_EN (1<<0) #define MI_SET_APPID MI_INSTR(0x0e, 0) +#define MI_SET_APPID_SESSION_ID(x) ((x) << 0) #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) #define MI_OVERLAY_CONTINUE (0x0<<21) #define MI_OVERLAY_ON (0x1<<21) @@ -146,6 +150,7 @@ #define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) #define MI_SRM_LRM_GLOBAL_GTT (1<<22) #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ +#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22) #define MI_FLUSH_DW_STORE_INDEX (1<<21) #define MI_INVALIDATE_TLB (1<<18) #define MI_FLUSH_DW_OP_STOREDW (1<<14) @@ -273,6 +278,19 @@ #define MI_MATH_REG_CF 0x33 /* + * Media instructions used by the kernel + */ +#define MEDIA_INSTR(pipe, op, sub_op, flags) \ + (__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \ + (op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags)) + +#define MFX_WAIT MEDIA_INSTR(1, 0, 0, 0) +#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8) +#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9) + +#define CRYPTO_KEY_EXCHANGE MEDIA_INSTR(2, 6, 9, 0) + +/* * Commands used only by the command parser */ #define MI_SET_PREDICATE MI_INSTR(0x01, 0) @@ -328,8 +346,6 @@ #define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \ ((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16)) -#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16)) - #define COLOR_BLT ((0x2<<29)|(0x40<<22)) #define SRC_COPY_BLT ((0x2<<29)|(0x43<<22)) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 62d40c986642..f2422d48be32 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -3,7 +3,9 @@ * Copyright © 2019 Intel Corporation */ -#include "debugfs_gt.h" +#include <drm/intel-gtt.h> + +#include "intel_gt_debugfs.h" #include "gem/i915_gem_lmem.h" #include "i915_drv.h" @@ -15,12 +17,13 @@ #include "intel_gt_requests.h" #include "intel_migrate.h" #include "intel_mocs.h" +#include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" #include "intel_uncore.h" -#include "intel_pm.h" #include "shmem_utils.h" +#include "pxp/intel_pxp.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -434,7 +437,7 @@ void intel_gt_driver_register(struct intel_gt *gt) { intel_rps_driver_register(>->rps); - debugfs_gt_register(gt); + intel_gt_debugfs_register(gt); } static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) @@ -481,7 +484,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt) static struct i915_address_space *kernel_vm(struct intel_gt *gt) { if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING) - return &i915_ppgtt_create(gt)->vm; + return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm; else return i915_vm_get(>->ggtt->vm); } @@ -660,6 +663,8 @@ int intel_gt_init(struct intel_gt *gt) if (err) return err; + intel_gt_init_workarounds(gt); + /* * This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -682,6 +687,8 @@ int intel_gt_init(struct intel_gt *gt) goto err_pm; } + intel_set_mocs_index(gt); + err = intel_engines_init(gt); if (err) goto err_engines; @@ -710,6 +717,8 @@ int intel_gt_init(struct intel_gt *gt) intel_migrate_init(>->migrate, gt); + intel_pxp_init(>->pxp); + goto out_fw; err_gt: __intel_gt_disable(gt); @@ -737,6 +746,8 @@ void intel_gt_driver_remove(struct intel_gt *gt) intel_uc_driver_remove(>->uc); intel_engines_release(gt); + + intel_gt_flush_buffer_pool(gt); } void intel_gt_driver_unregister(struct intel_gt *gt) @@ -745,12 +756,14 @@ void intel_gt_driver_unregister(struct intel_gt *gt) intel_rps_driver_unregister(>->rps); + intel_pxp_fini(>->pxp); + /* * Upon unregistering the device to prevent any new users, cancel * all in-flight requests so that we can quickly unbind the active * resources. */ - intel_gt_set_wedged(gt); + intel_gt_set_wedged_on_fini(gt); /* Scrub all HW state upon release */ with_intel_runtime_pm(gt->uncore->rpm, wakeref) @@ -765,6 +778,7 @@ void intel_gt_driver_release(struct intel_gt *gt) if (vm) /* FIXME being called twice on error paths :( */ i915_vm_put(vm); + intel_wa_list_free(>->wa_list); intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); intel_gt_fini_buffer_pool(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c index aa0a59c5b614..acc49c56a9f3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c @@ -245,8 +245,6 @@ void intel_gt_fini_buffer_pool(struct intel_gt *gt) struct intel_gt_buffer_pool *pool = >->buffer_pool; int n; - intel_gt_flush_buffer_pool(gt); - for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) GEM_BUG_ON(!list_empty(&pool->cache_list[n])); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c new file mode 100644 index 000000000000..f103664b71d4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/debugfs.h> + +#include "i915_drv.h" +#include "intel_gt_debugfs.h" +#include "intel_gt_engines_debugfs.h" +#include "intel_gt_pm_debugfs.h" +#include "intel_sseu_debugfs.h" +#include "pxp/intel_pxp_debugfs.h" +#include "uc/intel_uc_debugfs.h" + +int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val) +{ + int ret = intel_gt_terminally_wedged(gt); + + switch (ret) { + case -EIO: + *val = 1; + return 0; + case 0: + *val = 0; + return 0; + default: + return ret; + } +} + +int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val) +{ + /* Flush any previous reset before applying for a new one */ + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); + + intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE, + "Manually reset engine mask to %llx", val); + return 0; +} + +/* + * keep the interface clean where the first parameter + * is a 'struct intel_gt *' instead of 'void *' + */ +static int __intel_gt_debugfs_reset_show(void *data, u64 *val) +{ + return intel_gt_debugfs_reset_show(data, val); +} + +static int __intel_gt_debugfs_reset_store(void *data, u64 val) +{ + return intel_gt_debugfs_reset_store(data, val); +} + +DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show, + __intel_gt_debugfs_reset_store, "%llu\n"); + +static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root) +{ + static const struct intel_gt_debugfs_file files[] = { + { "reset", &reset_fops, NULL }, + }; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); +} + +void intel_gt_debugfs_register(struct intel_gt *gt) +{ + struct dentry *root; + + if (!gt->i915->drm.primary->debugfs_root) + return; + + root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root); + if (IS_ERR(root)) + return; + + gt_debugfs_register(gt, root); + + intel_gt_engines_debugfs_register(gt, root); + intel_gt_pm_debugfs_register(gt, root); + intel_sseu_debugfs_register(gt, root); + + intel_uc_debugfs_register(>->uc, root); + intel_pxp_debugfs_register(>->pxp, root); +} + +void intel_gt_debugfs_register_files(struct dentry *root, + const struct intel_gt_debugfs_file *files, + unsigned long count, void *data) +{ + while (count--) { + umode_t mode = files->fops->write ? 0644 : 0444; + + if (!files->eval || files->eval(data)) + debugfs_create_file(files->name, + mode, root, data, + files->fops); + + files++; + } +} diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h index f77540f727e9..e307ceb99031 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h @@ -3,14 +3,14 @@ * Copyright © 2019 Intel Corporation */ -#ifndef DEBUGFS_GT_H -#define DEBUGFS_GT_H +#ifndef INTEL_GT_DEBUGFS_H +#define INTEL_GT_DEBUGFS_H #include <linux/file.h> struct intel_gt; -#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \ +#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ return single_open(file, __name ## _show, inode->i_private); \ @@ -23,16 +23,20 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } -void debugfs_gt_register(struct intel_gt *gt); +void intel_gt_debugfs_register(struct intel_gt *gt); -struct debugfs_gt_file { +struct intel_gt_debugfs_file { const char *name; const struct file_operations *fops; bool (*eval)(void *data); }; void intel_gt_debugfs_register_files(struct dentry *root, - const struct debugfs_gt_file *files, + const struct intel_gt_debugfs_file *files, unsigned long count, void *data); -#endif /* DEBUGFS_GT_H */ +/* functions that need to be accessed by the upper level non-gt interfaces */ +int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val); +int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val); + +#endif /* INTEL_GT_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c index 5e3725e62241..8f9b874fdc9c 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_engines.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c @@ -6,10 +6,10 @@ #include <drm/drm_print.h> -#include "debugfs_engines.h" -#include "debugfs_gt.h" #include "i915_drv.h" /* for_each_engine! */ #include "intel_engine.h" +#include "intel_gt_debugfs.h" +#include "intel_gt_engines_debugfs.h" static int engines_show(struct seq_file *m, void *data) { @@ -24,11 +24,11 @@ static int engines_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(engines); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(engines); -void debugfs_engines_register(struct intel_gt *gt, struct dentry *root) +void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "engines", &engines_fops }, }; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h new file mode 100644 index 000000000000..dda113452da9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_ENGINES_DEBUGFS_H +#define INTEL_GT_ENGINES_DEBUGFS_H + +struct intel_gt; +struct dentry; + +void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root); + +#endif /* INTEL_GT_ENGINES_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index b2de83be4d97..699a74582d32 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -13,6 +13,7 @@ #include "intel_lrc_reg.h" #include "intel_uncore.h" #include "intel_rps.h" +#include "pxp/intel_pxp_irq.h" static void guc_irq_handler(struct intel_guc *guc, u16 iir) { @@ -64,6 +65,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(>->rps, iir); + if (instance == OTHER_KCR_INSTANCE) + return intel_pxp_irq_handler(>->pxp, iir); + WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); } @@ -196,6 +200,9 @@ void gen11_gt_irq_reset(struct intel_gt *gt) intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); + + intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0); } void gen11_gt_irq_postinstall(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index dea8e2479897..c0fa41e4c803 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -18,6 +18,9 @@ #include "intel_rc6.h" #include "intel_rps.h" #include "intel_wakeref.h" +#include "pxp/intel_pxp_pm.h" + +#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2) static void user_forcewake(struct intel_gt *gt, bool suspend) { @@ -83,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf) intel_rc6_unpark(>->rc6); intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); + intel_guc_busyness_unpark(gt); intel_gt_unpark_requests(gt); runtime_begin(gt); @@ -101,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf) runtime_end(gt); intel_gt_park_requests(gt); + intel_guc_busyness_park(gt); i915_vma_parked(gt); i915_pmu_gt_parked(i915); intel_rps_park(>->rps); @@ -262,6 +267,8 @@ int intel_gt_resume(struct intel_gt *gt) intel_uc_resume(>->uc); + intel_pxp_resume(>->pxp); + user_forcewake(gt, false); out_fw: @@ -279,7 +286,7 @@ static void wait_for_suspend(struct intel_gt *gt) if (!intel_gt_pm_is_awake(gt)) return; - if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { + if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) { /* * Forcibly cancel outstanding work and leave * the gpu quiet. @@ -296,7 +303,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) user_forcewake(gt, true); wait_for_suspend(gt); - intel_uc_suspend(>->uc); + intel_pxp_suspend_prepare(>->pxp); } static suspend_state_t pm_suspend_target(void) @@ -320,6 +327,9 @@ void intel_gt_suspend_late(struct intel_gt *gt) GEM_BUG_ON(gt->awake); + intel_uc_suspend(>->uc); + intel_pxp_suspend(>->pxp); + /* * On disabling the device, we want to turn off HW access to memory * that we no longer own. @@ -346,6 +356,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { + intel_pxp_runtime_suspend(>->pxp); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -353,11 +364,19 @@ void intel_gt_runtime_suspend(struct intel_gt *gt) int intel_gt_runtime_resume(struct intel_gt *gt) { + int ret; + GT_TRACE(gt, "\n"); intel_gt_init_swizzling(gt); intel_ggtt_restore_fences(gt->ggtt); - return intel_uc_runtime_resume(>->uc); + ret = intel_uc_runtime_resume(>->uc); + if (ret) + return ret; + + intel_pxp_runtime_resume(>->pxp); + + return 0; } static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index d0588d8aaa44..bc898df7a48c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) return intel_wakeref_get_if_active(>->wakeref); } +static inline void intel_gt_pm_might_get(struct intel_gt *gt) +{ + intel_wakeref_might_get(>->wakeref); +} + static inline void intel_gt_pm_put(struct intel_gt *gt) { intel_wakeref_put(>->wakeref); @@ -41,6 +46,15 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt) intel_wakeref_put_async(>->wakeref); } +static inline void intel_gt_pm_might_put(struct intel_gt *gt) +{ + intel_wakeref_might_put(>->wakeref); +} + +#define with_intel_gt_pm(gt, tmp) \ + for (tmp = 1, intel_gt_pm_get(gt); tmp; \ + intel_gt_pm_put(gt), tmp = 0) + static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) { return intel_wakeref_wait_for_idle(>->wakeref); diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index d6f5836396f8..404dfa7673c6 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -6,18 +6,59 @@ #include <linux/seq_file.h> -#include "debugfs_gt.h" -#include "debugfs_gt_pm.h" #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" +#include "intel_gt_debugfs.h" #include "intel_gt_pm.h" +#include "intel_gt_pm_debugfs.h" #include "intel_llc.h" +#include "intel_pcode.h" #include "intel_rc6.h" #include "intel_rps.h" #include "intel_runtime_pm.h" -#include "intel_sideband.h" #include "intel_uncore.h" +#include "vlv_sideband.h" + +int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) +{ + atomic_inc(>->user_wakeref); + intel_gt_pm_get(gt); + if (GRAPHICS_VER(gt->i915) >= 6) + intel_uncore_forcewake_user_get(gt->uncore); + + return 0; +} + +int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) +{ + if (GRAPHICS_VER(gt->i915) >= 6) + intel_uncore_forcewake_user_put(gt->uncore); + intel_gt_pm_put(gt); + atomic_dec(>->user_wakeref); + + return 0; +} + +static int forcewake_user_open(struct inode *inode, struct file *file) +{ + struct intel_gt *gt = inode->i_private; + + return intel_gt_pm_debugfs_forcewake_user_open(gt); +} + +static int forcewake_user_release(struct inode *inode, struct file *file) +{ + struct intel_gt *gt = inode->i_private; + + return intel_gt_pm_debugfs_forcewake_user_release(gt); +} + +static const struct file_operations forcewake_user_fops = { + .owner = THIS_MODULE, + .open = forcewake_user_open, + .release = forcewake_user_release, +}; static int fw_domains_show(struct seq_file *m, void *data) { @@ -36,7 +77,7 @@ static int fw_domains_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains); static void print_rc6_res(struct seq_file *m, const char *title, @@ -238,11 +279,10 @@ static int drpc_show(struct seq_file *m, void *unused) return err; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc); -static int frequency_show(struct seq_file *m, void *unused) +void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) { - struct intel_gt *gt = m->private; struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; struct intel_rps *rps = >->rps; @@ -254,21 +294,21 @@ static int frequency_show(struct seq_file *m, void *unused) u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); - seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); - seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); - seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f); + drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> MEMSTAT_VID_SHIFT); - seq_printf(m, "Current P-state: %d\n", + drm_printf(p, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { u32 rpmodectl, freq_sts; rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); - seq_printf(m, "Video Turbo Mode: %s\n", + drm_printf(p, "Video Turbo Mode: %s\n", yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); - seq_printf(m, "HW control enabled: %s\n", + drm_printf(p, "HW control enabled: %s\n", yesno(rpmodectl & GEN6_RP_ENABLE)); - seq_printf(m, "SW control enabled: %s\n", + drm_printf(p, "SW control enabled: %s\n", yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); @@ -276,25 +316,25 @@ static int frequency_show(struct seq_file *m, void *unused) freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); vlv_punit_put(i915); - seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); - seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq); + drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); + drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq); - seq_printf(m, "actual GPU freq: %d MHz\n", + drm_printf(p, "actual GPU freq: %d MHz\n", intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); - seq_printf(m, "current GPU freq: %d MHz\n", + drm_printf(p, "current GPU freq: %d MHz\n", intel_gpu_freq(rps, rps->cur_freq)); - seq_printf(m, "max GPU freq: %d MHz\n", + drm_printf(p, "max GPU freq: %d MHz\n", intel_gpu_freq(rps, rps->max_freq)); - seq_printf(m, "min GPU freq: %d MHz\n", + drm_printf(p, "min GPU freq: %d MHz\n", intel_gpu_freq(rps, rps->min_freq)); - seq_printf(m, "idle GPU freq: %d MHz\n", + drm_printf(p, "idle GPU freq: %d MHz\n", intel_gpu_freq(rps, rps->idle_freq)); - seq_printf(m, "efficient (RPe) frequency: %d MHz\n", + drm_printf(p, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(rps, rps->efficient_freq)); } else if (GRAPHICS_VER(i915) >= 6) { u32 rp_state_limits; @@ -309,13 +349,11 @@ static int frequency_show(struct seq_file *m, void *unused) int max_freq; rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); - if (IS_GEN9_LP(i915)) { - rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); + rp_state_cap = intel_rps_read_state_cap(rps); + if (IS_GEN9_LP(i915)) gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); - } else { - rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); + else gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); - } /* RPSTAT1 is in the GT power well */ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); @@ -376,113 +414,121 @@ static int frequency_show(struct seq_file *m, void *unused) } pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); - seq_printf(m, "Video Turbo Mode: %s\n", + drm_printf(p, "Video Turbo Mode: %s\n", yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); - seq_printf(m, "HW control enabled: %s\n", + drm_printf(p, "HW control enabled: %s\n", yesno(rpmodectl & GEN6_RP_ENABLE)); - seq_printf(m, "SW control enabled: %s\n", + drm_printf(p, "SW control enabled: %s\n", yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); - seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", + drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_mask); if (GRAPHICS_VER(i915) <= 10) - seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", + drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", pm_isr, pm_iir); - seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", + drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", rps->pm_intrmsk_mbz); - seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); - seq_printf(m, "Render p-state ratio: %d\n", + drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + drm_printf(p, "Render p-state ratio: %d\n", (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); - seq_printf(m, "Render p-state VID: %d\n", + drm_printf(p, "Render p-state VID: %d\n", gt_perf_status & 0xff); - seq_printf(m, "Render p-state limit: %d\n", + drm_printf(p, "Render p-state limit: %d\n", rp_state_limits & 0xff); - seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); - seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); - seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); - seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); - seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); - seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", + drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); + drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); + drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); + drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); + drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); + drm_printf(p, "CAGF: %dMHz\n", cagf); + drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", rpcurupei, intel_gt_pm_interval_to_ns(gt, rpcurupei)); - seq_printf(m, "RP CUR UP: %d (%lldns)\n", + drm_printf(p, "RP CUR UP: %d (%lldns)\n", rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%lldns)\n", + drm_printf(p, "RP PREV UP: %d (%lldns)\n", rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); - seq_printf(m, "Up threshold: %d%%\n", + drm_printf(p, "Up threshold: %d%%\n", rps->power.up_threshold); - seq_printf(m, "RP UP EI: %d (%lldns)\n", + drm_printf(p, "RP UP EI: %d (%lldns)\n", rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); - seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n", + drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); - seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", + drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", rpcurdownei, intel_gt_pm_interval_to_ns(gt, rpcurdownei)); - seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", + drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", rpcurdown, intel_gt_pm_interval_to_ns(gt, rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", + drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", rpprevdown, intel_gt_pm_interval_to_ns(gt, rpprevdown)); - seq_printf(m, "Down threshold: %d%%\n", + drm_printf(p, "Down threshold: %d%%\n", rps->power.down_threshold); - seq_printf(m, "RP DOWN EI: %d (%lldns)\n", + drm_printf(p, "RP DOWN EI: %d (%lldns)\n", rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); - seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n", + drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; max_freq *= (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; max_freq *= (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; max_freq *= (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(rps, max_freq)); - seq_printf(m, "Max overclocked frequency: %dMHz\n", + drm_printf(p, "Max overclocked frequency: %dMHz\n", intel_gpu_freq(rps, rps->max_freq)); - seq_printf(m, "Current freq: %d MHz\n", + drm_printf(p, "Current freq: %d MHz\n", intel_gpu_freq(rps, rps->cur_freq)); - seq_printf(m, "Actual freq: %d MHz\n", cagf); - seq_printf(m, "Idle freq: %d MHz\n", + drm_printf(p, "Actual freq: %d MHz\n", cagf); + drm_printf(p, "Idle freq: %d MHz\n", intel_gpu_freq(rps, rps->idle_freq)); - seq_printf(m, "Min freq: %d MHz\n", + drm_printf(p, "Min freq: %d MHz\n", intel_gpu_freq(rps, rps->min_freq)); - seq_printf(m, "Boost freq: %d MHz\n", + drm_printf(p, "Boost freq: %d MHz\n", intel_gpu_freq(rps, rps->boost_freq)); - seq_printf(m, "Max freq: %d MHz\n", + drm_printf(p, "Max freq: %d MHz\n", intel_gpu_freq(rps, rps->max_freq)); - seq_printf(m, + drm_printf(p, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(rps, rps->efficient_freq)); } else { - seq_puts(m, "no P-state info available\n"); + drm_puts(p, "no P-state info available\n"); } - seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk); - seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq); - seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); + drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk); + drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq); + drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); intel_runtime_pm_put(uncore->rpm, wakeref); +} + +static int frequency_show(struct seq_file *m, void *unused) +{ + struct intel_gt *gt = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + intel_gt_pm_frequency_dump(gt, &p); return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency); static int llc_show(struct seq_file *m, void *data) { @@ -535,7 +581,7 @@ static bool llc_eval(void *data) return HAS_LLC(gt->i915); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(llc); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc); static const char *rps_power_to_str(unsigned int power) { @@ -614,14 +660,15 @@ static bool rps_eval(void *data) return HAS_RPS(gt->i915); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost); -void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root) +void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "drpc", &drpc_fops, NULL }, { "frequency", &frequency_fops, NULL }, { "forcewake", &fw_domains_fops, NULL }, + { "forcewake_user", &forcewake_user_fops, NULL}, { "llc", &llc_fops, llc_eval }, { "rps_boost", &rps_boost_fops, rps_eval }, }; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h new file mode 100644 index 000000000000..a8457887ec65 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_PM_DEBUGFS_H +#define INTEL_GT_PM_DEBUGFS_H + +struct intel_gt; +struct dentry; +struct drm_printer; + +void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root); +void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m); + +/* functions that need to be accessed by the upper level non-gt interfaces */ +int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt); +int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt); + +#endif /* INTEL_GT_PM_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index a81e21bf1bd1..14216cc471b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -26,6 +26,7 @@ #include "intel_rps_types.h" #include "intel_migrate_types.h" #include "intel_wakeref.h" +#include "pxp/intel_pxp_types.h" struct drm_i915_private; struct i915_ggtt; @@ -72,6 +73,8 @@ struct intel_gt { struct intel_uc uc; + struct i915_wa_list wa_list; + struct intel_gt_timelines { spinlock_t lock; /* protects active_list */ struct list_head active_list; @@ -184,6 +187,9 @@ struct intel_gt { u8 num_engines; + /* General presence of SFC units */ + u8 sfc_mask; + /* Media engine access to SFC per instance */ u8 vdbox_sfc_access; @@ -192,6 +198,12 @@ struct intel_gt { unsigned long mslice_mask; } info; + + struct { + u8 uc_index; + } mocs; + + struct intel_pxp pxp; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index e137dd32b5b8..9fee968d57db 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -6,6 +6,9 @@ #include <linux/slab.h> /* fault-inject.h is not standalone! */ #include <linux/fault-inject.h> +#include <linux/sched/mm.h> + +#include <drm/drm_cache.h> #include "gem/i915_gem_lmem.h" #include "i915_trace.h" @@ -28,7 +31,8 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz) * used the passed in size for the page size, which should ensure it * also has the same alignment. */ - obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0); + obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, + vm->lmem_pt_obj_flags); /* * Ensure all paging structures for this vm share the same dma-resv * object underneath, with the idea that one object_lock() will lock @@ -155,7 +159,7 @@ void i915_vm_resv_release(struct kref *kref) static void __i915_vm_release(struct work_struct *work) { struct i915_address_space *vm = - container_of(work, struct i915_address_space, rcu.work); + container_of(work, struct i915_address_space, release_work); vm->cleanup(vm); i915_address_space_fini(vm); @@ -171,7 +175,7 @@ void i915_vm_release(struct kref *kref) GEM_BUG_ON(i915_is_ggtt(vm)); trace_i915_ppgtt_release(vm); - queue_rcu_work(vm->i915->wq, &vm->rcu); + queue_work(vm->i915->wq, &vm->release_work); } void i915_address_space_init(struct i915_address_space *vm, int subclass) @@ -185,7 +189,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass) if (!kref_read(&vm->resv_ref)) kref_init(&vm->resv_ref); - INIT_RCU_WORK(&vm->rcu, __i915_vm_release); + INIT_WORK(&vm->release_work, __i915_vm_release); atomic_set(&vm->open, 1); /* @@ -272,6 +276,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch) val = POISON_FREE; memset(vaddr, val, scratch->base.size); + drm_clflush_virt_range(vaddr, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index bc7153018ebd..51afe66d00f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -135,6 +135,9 @@ typedef u64 gen8_pte_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) +#define GEN8_PAGE_PRESENT BIT_ULL(0) +#define GEN8_PAGE_RW BIT_ULL(1) + #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) @@ -213,7 +216,7 @@ struct i915_vma_ops { struct i915_address_space { struct kref ref; - struct rcu_work rcu; + struct work_struct release_work; struct drm_mm mm; struct intel_gt *gt; @@ -260,6 +263,9 @@ struct i915_address_space { u8 pd_shift; u8 scratch_order; + /* Flags used when creating page-table objects for this vm */ + unsigned long lmem_pt_obj_flags; + struct drm_i915_gem_object * (*alloc_pt_dma)(struct i915_address_space *vm, int sz); @@ -519,7 +525,8 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]); } -void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt); +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, + unsigned long lmem_pt_obj_flags); int i915_ggtt_probe_hw(struct drm_i915_private *i915); int i915_ggtt_init_hw(struct drm_i915_private *i915); @@ -537,8 +544,11 @@ static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) int i915_ppgtt_init_hw(struct intel_gt *gt); -struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, + unsigned long lmem_pt_obj_flags); +void i915_ggtt_suspend_vm(struct i915_address_space *vm); +bool i915_ggtt_resume_vm(struct i915_address_space *vm); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index eb1a15deed22..08d7d5ae263a 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -3,12 +3,13 @@ * Copyright © 2019 Intel Corporation */ +#include <asm/tsc.h> #include <linux/cpufreq.h> #include "i915_drv.h" #include "intel_gt.h" #include "intel_llc.h" -#include "intel_sideband.h" +#include "intel_pcode.h" struct ia_constants { unsigned int min_gpu_freq; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index bb4af4977920..b3489599e4de 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -226,6 +226,40 @@ static const u8 gen12_xcs_offsets[] = { END }; +static const u8 dg2_xcs_offsets[] = { + NOP(1), + LRI(15, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + REG(0x120), + REG(0x124), + + NOP(1), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + END +}; + static const u8 gen8_rcs_offsets[] = { NOP(1), LRI(14, POSTED), @@ -525,6 +559,49 @@ static const u8 xehp_rcs_offsets[] = { END }; +static const u8 dg2_rcs_offsets[] = { + NOP(1), + LRI(15, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + REG(0x120), + REG(0x124), + + NOP(1), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(3, POSTED), + REG(0x1b0), + REG16(0x5a8), + REG16(0x5ac), + + NOP(6), + LRI(1, 0), + REG(0x0c8), + + END +}; + #undef END #undef REG16 #undef REG @@ -543,7 +620,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) !intel_engine_has_relative_mmio(engine)); if (engine->class == RENDER_CLASS) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) + return dg2_rcs_offsets; + else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) return xehp_rcs_offsets; else if (GRAPHICS_VER(engine->i915) >= 12) return gen12_rcs_offsets; @@ -554,7 +633,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) else return gen8_rcs_offsets; } else { - if (GRAPHICS_VER(engine->i915) >= 12) + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) + return dg2_xcs_offsets; + else if (GRAPHICS_VER(engine->i915) >= 12) return gen12_xcs_offsets; else if (GRAPHICS_VER(engine->i915) >= 9) return gen9_xcs_offsets; @@ -861,7 +942,13 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) context_size += PAGE_SIZE; } - obj = i915_gem_object_create_lmem(engine->i915, context_size, 0); + if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) { + ce->parallel.guc.parent_page = context_size / PAGE_SIZE; + context_size += PARENT_SCRATCH_SIZE; + } + + obj = i915_gem_object_create_lmem(engine->i915, context_size, + I915_BO_ALLOC_PM_VOLATILE); if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(engine->i915, context_size); if (IS_ERR(obj)) @@ -1080,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) cs = gen12_emit_cmd_buf_wa(ce, cs); cs = gen12_emit_restore_scratch(ce, cs); + /* Wa_16013000631:dg2 */ + if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(ce->engine->i915)) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); + return cs; } diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index 1dac21aa7e5c..19a01878fee3 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -13,7 +13,6 @@ struct insert_pte_data { u64 offset; - bool is_lmem; }; #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ @@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, struct insert_pte_data *d = data; vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, - d->is_lmem ? PTE_LM : 0); + i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); d->offset += PAGE_SIZE; } @@ -78,7 +77,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) * TODO: Add support for huge LMEM PTEs */ - vm = i915_ppgtt_create(gt); + vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY); if (IS_ERR(vm)) return ERR_CAST(vm); @@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) goto err_vm; /* Now allow the GPU to rewrite the PTE via its own ppGTT */ - d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); - vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); + vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); } return &vm->vm; @@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq, GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); /* Compute the page directory offset for the target address range */ - offset += (u64)rq->engine->instance << 32; offset >>= 12; offset *= sizeof(u64); offset += 2 * CHUNK_SZ; + offset += (u64)rq->engine->instance << 32; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 582c4423b95d..9c253ba593c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -22,6 +22,8 @@ struct drm_i915_mocs_table { unsigned int size; unsigned int n_entries; const struct drm_i915_mocs_entry *table; + u8 uc_index; + u8 unused_entries_index; }; /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ @@ -40,6 +42,8 @@ struct drm_i915_mocs_table { #define L3_ESC(value) ((value) << 0) #define L3_SCC(value) ((value) << 1) #define _L3_CACHEABILITY(value) ((value) << 4) +#define L3_GLBGO(value) ((value) << 6) +#define L3_LKUP(value) ((value) << 7) /* Helper defines */ #define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ @@ -88,18 +92,25 @@ struct drm_i915_mocs_table { * * Entries not part of the following tables are undefined as far as * userspace is concerned and shouldn't be relied upon. For Gen < 12 - * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for - * PTE and will be initialized to an invalid value. + * they will be initialized to PTE. Gen >= 12 don't have a setting for + * PTE and those platforms except TGL/RKL will be initialized L3 WB to + * catch accidental use of reserved and unused mocs indexes. * * The last few entries are reserved by the hardware. For ICL+ they * should be initialized according to bspec and never used, for older * platforms they should never be written to. * - * NOTE: These tables are part of bspec and defined as part of hardware + * NOTE1: These tables are part of bspec and defined as part of hardware * interface for ICL+. For older platforms, they are part of kernel * ABI. It is expected that, for specific hardware platform, existing * entries will remain constant and the table will only be updated by * adding new entries, filling unused positions. + * + * NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS + * indices have been set to L3 WB. These reserved entries should never + * be used, they may be changed to low performant variants with better + * coherency in the future if more entries are needed. + * For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC. */ #define GEN9_MOCS_ENTRIES \ MOCS_ENTRY(I915_MOCS_UNCACHED, \ @@ -282,17 +293,9 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { }; static const struct drm_i915_mocs_entry dg1_mocs_table[] = { - /* Error */ - MOCS_ENTRY(0, 0, L3_0_DIRECT), /* UC */ MOCS_ENTRY(1, 0, L3_1_UC), - - /* Reserved */ - MOCS_ENTRY(2, 0, L3_0_DIRECT), - MOCS_ENTRY(3, 0, L3_0_DIRECT), - MOCS_ENTRY(4, 0, L3_0_DIRECT), - /* WB - L3 */ MOCS_ENTRY(5, 0, L3_3_WB), /* WB - L3 50% */ @@ -314,6 +317,83 @@ static const struct drm_i915_mocs_entry dg1_mocs_table[] = { MOCS_ENTRY(63, 0, L3_1_UC), }; +static const struct drm_i915_mocs_entry gen12_mocs_table[] = { + GEN11_MOCS_ENTRIES, + /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ + MOCS_ENTRY(48, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + L3 */ + MOCS_ENTRY(49, + LE_1_UC | LE_TC_1_LLC, + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + LLC */ + MOCS_ENTRY(50, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* Implicitly enable L1 - HDC:L1 */ + MOCS_ENTRY(51, + LE_1_UC | LE_TC_1_LLC, + L3_1_UC), + /* HW Special Case (CCS) */ + MOCS_ENTRY(60, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* HW Special Case (Displayable) */ + MOCS_ENTRY(61, + LE_1_UC | LE_TC_1_LLC, + L3_3_WB), +}; + +static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = { + /* wa_1608975824 */ + MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)), + + /* UC - Coherent; GO:L3 */ + MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)), + /* UC - Coherent; GO:Memory */ + MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), + /* UC - Non-Coherent; GO:Memory */ + MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)), + /* UC - Non-Coherent; GO:L3 */ + MOCS_ENTRY(4, 0, L3_1_UC), + + /* WB */ + MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)), + + /* HW Reserved - SW program but never use. */ + MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)), + MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)), + MOCS_ENTRY(60, 0, L3_1_UC), + MOCS_ENTRY(61, 0, L3_1_UC), + MOCS_ENTRY(62, 0, L3_1_UC), + MOCS_ENTRY(63, 0, L3_1_UC), +}; + +static const struct drm_i915_mocs_entry dg2_mocs_table[] = { + /* UC - Coherent; GO:L3 */ + MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)), + /* UC - Coherent; GO:Memory */ + MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), + /* UC - Non-Coherent; GO:Memory */ + MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)), + + /* WB - LC */ + MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), +}; + +static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = { + /* Wa_14011441408: Set Go to Memory for MOCS#0 */ + MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), + /* UC - Coherent; GO:Memory */ + MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), + /* UC - Non-Coherent; GO:Memory */ + MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)), + + /* WB - LC */ + MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), +}; + enum { HAS_GLOBAL_MOCS = BIT(0), HAS_ENGINE_MOCS = BIT(1), @@ -340,14 +420,45 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, { unsigned int flags; - if (IS_DG1(i915)) { + memset(table, 0, sizeof(struct drm_i915_mocs_table)); + + table->unused_entries_index = I915_MOCS_PTE; + if (IS_DG2(i915)) { + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { + table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax); + table->table = dg2_mocs_table_g10_ax; + } else { + table->size = ARRAY_SIZE(dg2_mocs_table); + table->table = dg2_mocs_table; + } + table->uc_index = 1; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->unused_entries_index = 3; + } else if (IS_XEHPSDV(i915)) { + table->size = ARRAY_SIZE(xehpsdv_mocs_table); + table->table = xehpsdv_mocs_table; + table->uc_index = 2; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->unused_entries_index = 5; + } else if (IS_DG1(i915)) { table->size = ARRAY_SIZE(dg1_mocs_table); table->table = dg1_mocs_table; + table->uc_index = 1; table->n_entries = GEN9_NUM_MOCS_ENTRIES; - } else if (GRAPHICS_VER(i915) >= 12) { + table->uc_index = 1; + table->unused_entries_index = 5; + } else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { + /* For TGL/RKL, Can't be changed now for ABI reasons */ table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->uc_index = 3; + } else if (GRAPHICS_VER(i915) >= 12) { + table->size = ARRAY_SIZE(gen12_mocs_table); + table->table = gen12_mocs_table; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; + table->uc_index = 3; + table->unused_entries_index = 2; } else if (GRAPHICS_VER(i915) == 11) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; @@ -393,16 +504,16 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, } /* - * Get control_value from MOCS entry taking into account when it's not used: - * I915_MOCS_PTE's value is returned in this case. + * Get control_value from MOCS entry taking into account when it's not used + * then if unused_entries_index is non-zero then its value will be returned + * otherwise I915_MOCS_PTE's value is returned in this case. */ static u32 get_entry_control(const struct drm_i915_mocs_table *table, unsigned int index) { if (index < table->size && table->table[index].used) return table->table[index].control_value; - - return table->table[I915_MOCS_PTE].control_value; + return table->table[table->unused_entries_index].control_value; } #define for_each_mocs(mocs, t, i) \ @@ -417,6 +528,8 @@ static void __init_mocs_table(struct intel_uncore *uncore, unsigned int i; u32 mocs; + drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index, + "Unused entries index should have been defined\n"); for_each_mocs(mocs, table, i) intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs); } @@ -443,16 +556,16 @@ static void init_mocs_table(struct intel_engine_cs *engine, } /* - * Get l3cc_value from MOCS entry taking into account when it's not used: - * I915_MOCS_PTE's value is returned in this case. + * Get l3cc_value from MOCS entry taking into account when it's not used + * then if unused_entries_index is not zero then its value will be returned + * otherwise I915_MOCS_PTE's value is returned in this case. */ static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table, unsigned int index) { if (index < table->size && table->table[index].used) return table->table[index].l3cc_value; - - return table->table[I915_MOCS_PTE].l3cc_value; + return table->table[table->unused_entries_index].l3cc_value; } static u32 l3cc_combine(u16 low, u16 high) @@ -468,10 +581,9 @@ static u32 l3cc_combine(u16 low, u16 high) 0; \ i++) -static void init_l3cc_table(struct intel_engine_cs *engine, +static void init_l3cc_table(struct intel_uncore *uncore, const struct drm_i915_mocs_table *table) { - struct intel_uncore *uncore = engine->uncore; unsigned int i; u32 l3cc; @@ -496,7 +608,7 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) init_mocs_table(engine, &table); if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS) - init_l3cc_table(engine, &table); + init_l3cc_table(engine->uncore, &table); } static u32 global_mocs_offset(void) @@ -504,6 +616,14 @@ static u32 global_mocs_offset(void) return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0)); } +void intel_set_mocs_index(struct intel_gt *gt) +{ + struct drm_i915_mocs_table table; + + get_mocs_settings(gt->i915, &table); + gt->mocs.uc_index = table.uc_index; +} + void intel_mocs_init(struct intel_gt *gt) { struct drm_i915_mocs_table table; @@ -515,6 +635,14 @@ void intel_mocs_init(struct intel_gt *gt) flags = get_mocs_settings(gt->i915, &table); if (flags & HAS_GLOBAL_MOCS) __init_mocs_table(gt->uncore, &table, global_mocs_offset()); + + /* + * Initialize the L3CC table as part of mocs initalization to make + * sure the LNCFCMOCSx registers are programmed for the subsequent + * memory transactions including guc transactions + */ + if (flags & HAS_RENDER_L3CC) + init_l3cc_table(gt->uncore, &table); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index d83274f5163b..76db827210c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -36,5 +36,6 @@ struct intel_gt; void intel_mocs_init(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); +void intel_set_mocs_index(struct intel_gt *gt); #endif diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index 886060f7e6fc..4396bfd630d8 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -155,19 +155,20 @@ int i915_ppgtt_init_hw(struct intel_gt *gt) } static struct i915_ppgtt * -__ppgtt_create(struct intel_gt *gt) +__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags) { if (GRAPHICS_VER(gt->i915) < 8) return gen6_ppgtt_create(gt); else - return gen8_ppgtt_create(gt); + return gen8_ppgtt_create(gt, lmem_pt_obj_flags); } -struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) +struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, + unsigned long lmem_pt_obj_flags) { struct i915_ppgtt *ppgtt; - ppgtt = __ppgtt_create(gt); + ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags); if (IS_ERR(ppgtt)) return ppgtt; @@ -298,7 +299,8 @@ int ppgtt_set_pages(struct i915_vma *vma) return 0; } -void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) +void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt, + unsigned long lmem_pt_obj_flags) { struct drm_i915_private *i915 = gt->i915; @@ -306,6 +308,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.i915 = i915; ppgtt->vm.dma = i915->drm.dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags; dma_resv_init(&ppgtt->vm._resv); i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 799d382eea79..c3155ee58689 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -9,8 +9,8 @@ #include "i915_vgpu.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_pcode.h" #include "intel_rc6.h" -#include "intel_sideband.h" /** * DOC: RC6 @@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + /* Wa_16011777198 - Render powergating must remain disabled */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) + pg_enable = + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + else + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index a74b72f50cc9..9ea49e0a27c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -32,7 +32,7 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem) mem->remap_addr = dma_map_resource(i915->drm.dev, mem->region.start, mem->fake_mappable.size, - PCI_DMA_BIDIRECTIONAL, + DMA_BIDIRECTIONAL, DMA_ATTR_FORCE_CONTIGUOUS); if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) { drm_mm_remove_node(&mem->fake_mappable); @@ -62,16 +62,20 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) dma_unmap_resource(mem->i915->drm.dev, mem->remap_addr, mem->fake_mappable.size, - PCI_DMA_BIDIRECTIONAL, + DMA_BIDIRECTIONAL, DMA_ATTR_FORCE_CONTIGUOUS); } -static void +static int region_lmem_release(struct intel_memory_region *mem) { - intel_region_ttm_fini(mem); + int ret; + + ret = intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); release_fake_lmem_bar(mem); + + return ret; } static int @@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) return false; *start = 0; @@ -231,7 +235,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) return mem; err_region_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 91200c43951f..7be0002d9d70 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -6,7 +6,7 @@ #include <linux/sched/mm.h> #include <linux/stop_machine.h> -#include "display/intel_display_types.h" +#include "display/intel_display.h" #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" @@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN6_GRDOM_RENDER, - [BCS0] = GEN6_GRDOM_BLT, - [VCS0] = GEN6_GRDOM_MEDIA, - [VCS1] = GEN8_GRDOM_MEDIA2, - [VECS0] = GEN6_GRDOM_VECS, - }; struct intel_engine_cs *engine; u32 hw_mask; @@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt, hw_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= engine->reset_domain; } } @@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN11_GRDOM_RENDER, - [BCS0] = GEN11_GRDOM_BLT, - [VCS0] = GEN11_GRDOM_MEDIA, - [VCS1] = GEN11_GRDOM_MEDIA2, - [VCS2] = GEN11_GRDOM_MEDIA3, - [VCS3] = GEN11_GRDOM_MEDIA4, - [VCS4] = GEN11_GRDOM_MEDIA5, - [VCS5] = GEN11_GRDOM_MEDIA6, - [VCS6] = GEN11_GRDOM_MEDIA7, - [VCS7] = GEN11_GRDOM_MEDIA8, - [VECS0] = GEN11_GRDOM_VECS, - [VECS1] = GEN11_GRDOM_VECS2, - [VECS2] = GEN11_GRDOM_VECS3, - [VECS3] = GEN11_GRDOM_VECS4, - }; struct intel_engine_cs *engine; intel_engine_mask_t tmp; u32 reset_mask, unlock_mask = 0; @@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt, } else { reset_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - reset_mask |= hw_engine_mask[engine->id]; + reset_mask |= engine->reset_domain; ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); if (ret) goto sfc_unlock; @@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt, /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ synchronize_rcu_expedited(); - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, gt, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) - wait_on_bit(>->reset.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); + /* + * Prevent any other reset-engine attempt. We don't do this for GuC + * submission the GuC owns the per-engine reset, not the i915. + */ + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + wait_on_bit(>->reset.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } } intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, gt, tmp) - clear_bit_unlock(I915_RESET_ENGINE + engine->id, - >->reset.flags); + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); smp_mb__after_atomic(); wake_up_all(>->reset.queue); @@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > I915_WEDGED_ON_INIT); intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ @@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); intel_gt_retire_requests(gt); /* cleanup any wedged requests */ } diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 7c4d5158e03b..2fdd52b62092 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -112,7 +112,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE); + obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE | + I915_BO_ALLOC_PM_VOLATILE); if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt)) obj = i915_gem_object_create_stolen(i915, size); if (IS_ERR(obj)) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 2958e2fae380..3e6fac0340ef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -17,6 +17,7 @@ #include "intel_ring.h" #include "shmem_utils.h" #include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" /* Rough estimate of the typical request size, performing a flush, * set-context and then emitting the batch. @@ -291,7 +292,9 @@ static void xcs_sanitize(struct intel_engine_cs *engine) sanitize_hwsp(engine); /* And scrub the dirty cachelines for the HWSP */ - clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); + + intel_engine_reset_pinned_contexts(engine); } static void reset_prepare(struct intel_engine_cs *engine) @@ -1265,7 +1268,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) int size, err; if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) - return 0; + return NULL; err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); if (err < 0) @@ -1354,7 +1357,7 @@ retry: err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); if (!err && gen7_wa_vma) err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); - if (!err && engine->legacy.ring->vma->obj) + if (!err) err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); if (!err) err = intel_timeline_pin(timeline, &ww); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index d812b27835f8..07ff7ba7b2b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -11,8 +11,9 @@ #include "intel_gt_clock_utils.h" #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" +#include "intel_pcode.h" #include "intel_rps.h" -#include "intel_sideband.h" +#include "vlv_sideband.h" #include "../../../platform/x86/intel_ips.h" #define BUSY_MAX_EI 20u /* ms */ @@ -882,8 +883,6 @@ void intel_rps_park(struct intel_rps *rps) if (!intel_rps_is_enabled(rps)) return; - GEM_BUG_ON(atomic_read(&rps->num_waiters)); - if (!intel_rps_clear_active(rps)) return; @@ -937,8 +936,70 @@ void intel_rps_park(struct intel_rps *rps) GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } +u32 intel_rps_get_boost_frequency(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return slpc->boost_freq; + } else { + return intel_gpu_freq(rps, rps->boost_freq); + } +} + +static int rps_set_boost_freq(struct intel_rps *rps, u32 val) +{ + bool boost = false; + + /* Validate against (static) hardware limits */ + val = intel_freq_opcode(rps, val); + if (val < rps->min_freq || val > rps->max_freq) + return -EINVAL; + + mutex_lock(&rps->lock); + if (val != rps->boost_freq) { + rps->boost_freq = val; + boost = atomic_read(&rps->num_waiters); + } + mutex_unlock(&rps->lock); + if (boost) + schedule_work(&rps->work); + + return 0; +} + +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return intel_guc_slpc_set_boost_freq(slpc, freq); + } else { + return rps_set_boost_freq(rps, freq); + } +} + +void intel_rps_dec_waiters(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + intel_guc_slpc_dec_waiters(slpc); + } else { + atomic_dec(&rps->num_waiters); + } +} + void intel_rps_boost(struct i915_request *rq) { + struct intel_guc_slpc *slpc; + if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; @@ -946,6 +1007,16 @@ void intel_rps_boost(struct i915_request *rq) if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + /* Return if old value is non zero */ + if (!atomic_fetch_inc(&slpc->num_waiters)) + schedule_work(&slpc->boost_work); + + return; + } + if (atomic_fetch_inc(&rps->num_waiters)) return; @@ -996,20 +1067,16 @@ int intel_rps_set(struct intel_rps *rps, u8 val) static void gen6_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - struct intel_uncore *uncore = rps_to_uncore(rps); + u32 rp_state_cap = intel_rps_read_state_cap(rps); /* All of these values are in units of 50MHz */ /* static values from HW: RP0 > RP1 > RPn (min_freq) */ if (IS_GEN9_LP(i915)) { - u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 16) & 0xff; rps->rp1_freq = (rp_state_cap >> 8) & 0xff; rps->min_freq = (rp_state_cap >> 0) & 0xff; } else { - u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 0) & 0xff; rps->rp1_freq = (rp_state_cap >> 8) & 0xff; rps->min_freq = (rp_state_cap >> 16) & 0xff; @@ -1973,8 +2040,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps) u32 intel_rps_read_punit_req(struct intel_rps *rps) { struct intel_uncore *uncore = rps_to_uncore(rps); + struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; + intel_wakeref_t wakeref; + u32 freq = 0; - return intel_uncore_read(uncore, GEN6_RPNSWREQ); + with_intel_runtime_pm_if_in_use(rpm, wakeref) + freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); + + return freq; } static u32 intel_rps_get_req(u32 pureq) @@ -2140,6 +2213,19 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) return set_min_freq(rps, val); } +u32 intel_rps_read_state_cap(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + if (IS_XEHPSDV(i915)) + return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); + else if (IS_GEN9_LP(i915)) + return intel_uncore_read(uncore, BXT_RP_STATE_CAP); + else + return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); +} + /* External interface for intel_ips.ko */ static struct drm_i915_private __rcu *ips_mchdev; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index 4213bcce1667..aee12f37d38a 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps); void intel_rps_park(struct intel_rps *rps); void intel_rps_unpark(struct intel_rps *rps); void intel_rps_boost(struct i915_request *rq); +void intel_rps_dec_waiters(struct intel_rps *rps); +u32 intel_rps_get_boost_frequency(struct intel_rps *rps); +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq); int intel_rps_set(struct intel_rps *rps, u8 val); void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); @@ -41,6 +44,7 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps); u32 intel_rps_get_rpn_frequency(struct intel_rps *rps); u32 intel_rps_read_punit_req(struct intel_rps *rps); u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps); +u32 intel_rps_read_state_cap(struct intel_rps *rps); void gen5_rps_irq_handler(struct intel_rps *rps); void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index bbd272943c3f..bdf09051b8a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -46,11 +46,11 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) } void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u32 ss_mask) + u8 *subslice_mask, u32 ss_mask) { int offset = slice * sseu->ss_stride; - memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride); + memcpy(&subslice_mask[offset], &ss_mask, sseu->ss_stride); } unsigned int @@ -100,14 +100,24 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu) return total; } -static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, - u8 s_en, u32 ss_en, u16 eu_en) +static u32 get_ss_stride_mask(struct sseu_dev_info *sseu, u8 s, u32 ss_en) +{ + u32 ss_mask; + + ss_mask = ss_en >> (s * sseu->max_subslices); + ss_mask &= GENMASK(sseu->max_subslices - 1, 0); + + return ss_mask; +} + +static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, u8 s_en, + u32 g_ss_en, u32 c_ss_en, u16 eu_en) { int s, ss; - /* ss_en represents entire subslice mask across all slices */ + /* g_ss_en/c_ss_en represent entire subslice mask across all slices */ GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > - sizeof(ss_en) * BITS_PER_BYTE); + sizeof(g_ss_en) * BITS_PER_BYTE); for (s = 0; s < sseu->max_slices; s++) { if ((s_en & BIT(s)) == 0) @@ -115,7 +125,22 @@ static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, sseu->slice_mask |= BIT(s); - intel_sseu_set_subslices(sseu, s, ss_en); + /* + * XeHP introduces the concept of compute vs geometry DSS. To + * reduce variation between GENs around subslice usage, store a + * mask for both the geometry and compute enabled masks since + * userspace will need to be able to query these masks + * independently. Also compute a total enabled subslice count + * for the purposes of selecting subslices to use in a + * particular GEM context. + */ + intel_sseu_set_subslices(sseu, s, sseu->compute_subslice_mask, + get_ss_stride_mask(sseu, s, c_ss_en)); + intel_sseu_set_subslices(sseu, s, sseu->geometry_subslice_mask, + get_ss_stride_mask(sseu, s, g_ss_en)); + intel_sseu_set_subslices(sseu, s, sseu->subslice_mask, + get_ss_stride_mask(sseu, s, + g_ss_en | c_ss_en)); for (ss = 0; ss < sseu->max_subslices; ss++) if (intel_sseu_has_subslice(sseu, s, ss)) @@ -129,7 +154,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt) { struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; - u32 dss_en; + u32 g_dss_en, c_dss_en = 0; u16 eu_en = 0; u8 eu_en_fuse; u8 s_en; @@ -160,7 +185,9 @@ static void gen12_sseu_info_init(struct intel_gt *gt) s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; - dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); + g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE); + if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) + c_dss_en = intel_uncore_read(uncore, GEN12_GT_COMPUTE_DSS_ENABLE); /* one bit per pair of EUs */ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) @@ -173,7 +200,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt) if (eu_en_fuse & BIT(eu)) eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); - gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); + gen11_compute_sseu_info(sseu, s_en, g_dss_en, c_dss_en, eu_en); /* TGL only supports slice-level power gating */ sseu->has_slice_pg = 1; @@ -199,7 +226,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt) eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); - gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); + gen11_compute_sseu_info(sseu, s_en, ss_en, 0, eu_en); /* ICL has no power gating restrictions. */ sseu->has_slice_pg = 1; @@ -240,7 +267,7 @@ static void cherryview_sseu_info_init(struct intel_gt *gt) sseu_set_eus(sseu, 0, 1, ~disabled_mask); } - intel_sseu_set_subslices(sseu, 0, subslice_mask); + intel_sseu_set_subslices(sseu, 0, sseu->subslice_mask, subslice_mask); sseu->eu_total = compute_eu_total(sseu); @@ -296,7 +323,8 @@ static void gen9_sseu_info_init(struct intel_gt *gt) /* skip disabled slice */ continue; - intel_sseu_set_subslices(sseu, s, subslice_mask); + intel_sseu_set_subslices(sseu, s, sseu->subslice_mask, + subslice_mask); eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { @@ -408,7 +436,8 @@ static void bdw_sseu_info_init(struct intel_gt *gt) /* skip disabled slice */ continue; - intel_sseu_set_subslices(sseu, s, subslice_mask); + intel_sseu_set_subslices(sseu, s, sseu->subslice_mask, + subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { u8 eu_disabled_mask; @@ -485,10 +514,9 @@ static void hsw_sseu_info_init(struct intel_gt *gt) } fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); - switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { + switch (REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)) { default: - MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> - HSW_F1_EU_DIS_SHIFT); + MISSING_CASE(REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)); fallthrough; case HSW_F1_EU_DIS_10EUS: sseu->eu_per_subslice = 10; @@ -506,7 +534,8 @@ static void hsw_sseu_info_init(struct intel_gt *gt) sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { - intel_sseu_set_subslices(sseu, s, subslice_mask); + intel_sseu_set_subslices(sseu, s, sseu->subslice_mask, + subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 22fef98887c0..60882a74741e 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -26,9 +26,14 @@ struct drm_printer; #define GEN_DSS_PER_CSLICE 8 #define GEN_DSS_PER_MSLICE 8 +#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE) +#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE) + struct sseu_dev_info { u8 slice_mask; u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE]; u16 eu_total; u8 eu_per_subslice; @@ -78,6 +83,10 @@ intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, u8 mask; int ss_idx = subslice / BITS_PER_BYTE; + if (slice >= sseu->max_slices || + subslice >= sseu->max_subslices) + return false; + GEM_BUG_ON(ss_idx >= sseu->ss_stride); mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx]; @@ -97,7 +106,7 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u32 ss_mask); + u8 *subslice_mask, u32 ss_mask); void intel_sseu_info_init(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c index 1ba8b7da9d37..8bb3a91dad82 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -4,9 +4,9 @@ * Copyright © 2020 Intel Corporation */ -#include "debugfs_gt.h" -#include "intel_sseu_debugfs.h" #include "i915_drv.h" +#include "intel_gt_debugfs.h" +#include "intel_sseu_debugfs.h" static void sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, u8 *to_mask) @@ -282,7 +282,7 @@ static int sseu_status_show(struct seq_file *m, void *unused) return intel_sseu_status(m, gt); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status); static int rcs_topology_show(struct seq_file *m, void *unused) { @@ -293,11 +293,11 @@ static int rcs_topology_show(struct seq_file *m, void *unused) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology); void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "sseu_status", &sseu_status_fops, NULL }, { "rcs_topology", &rcs_topology_fops, NULL }, }; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 1257f4f11e66..438bbc7b8147 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline) timeline->hwsp_map = vaddr; timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES); - clflush(vaddr + ofs); + drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES); return 0; } @@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl) memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno)); WRITE_ONCE(*hwsp_seqno, tl->seqno); - clflush(hwsp_seqno); + drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES); } void intel_timeline_enter(struct intel_timeline *tl) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index aae609d7d85d..3113266c286e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER)) + if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) wa_masked_en(wal, COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* * These settings aren't actually workarounds, but general tuning settings that + * need to be programmed on dg2 platform. + */ +static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, + REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); + wa_add(wal, + FF_MODE2, + FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, + 0, false); +} + +/* + * These settings aren't actually workarounds, but general tuning settings that * need to be programmed on several platforms. */ static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, @@ -621,13 +637,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0, false); - - /* - * Wa_14012131227:dg1 - * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p - */ - wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1, - GEN9_RHWO_OPTIMIZATION_DISABLE); } static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -644,6 +653,108 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); } +static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + dg2_ctx_gt_tuning_init(engine, wal); + + /* Wa_16011186671:dg2_g11 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH); + wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010469329:dg2_g10 */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE); + + /* + * Wa_22010465075:dg2_g10 + * Wa_22010613112:dg2_g10 + * Wa_14010698770:dg2_g10 + */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + } + + /* Wa_16013271637:dg2 */ + wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1, + MSC_MSAA_REODER_BUF_BYPASS_DISABLE); + + /* Wa_22012532006:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA); +} + +static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + /* + * This is a "fake" workaround defined by software to ensure we + * maintain reliable, backward-compatible behavior for userspace with + * regards to how nested MI_BATCH_BUFFER_START commands are handled. + * + * The per-context setting of MI_MODE[12] determines whether the bits + * of a nested MI_BATCH_BUFFER_START instruction should be interpreted + * in the traditional manner or whether they should instead use a new + * tgl+ meaning that breaks backward compatibility, but allows nesting + * into 3rd-level batchbuffers. When this new capability was first + * added in TGL, it remained off by default unless a context + * intentionally opted in to the new behavior. However Xe_HPG now + * flips this on by default and requires that we explicitly opt out if + * we don't want the new behavior. + * + * From a SW perspective, we want to maintain the backward-compatible + * behavior for userspace, so we'll apply a fake workaround to set it + * back to the legacy behavior on platforms where the hardware default + * is to break compatibility. At the moment there is no Linux + * userspace that utilizes third-level batchbuffers, so this will avoid + * userspace from needing to make any changes. using the legacy + * meaning is the correct thing to do. If/when we have userspace + * consumers that want to utilize third-level batch nesting, we can + * provide a context parameter to allow them to opt-in. + */ + wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN); +} + +static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + u8 mocs; + + /* + * Some blitter commands do not have a field for MOCS, those + * commands will use MOCS index pointed by BLIT_CCTL. + * BLIT_CCTL registers are needed to be programmed to un-cached. + */ + if (engine->class == COPY_ENGINE_CLASS) { + mocs = engine->gt->mocs.uc_index; + wa_write_clr_set(wal, + BLIT_CCTL(engine->mmio_base), + BLIT_CCTL_MASK, + BLIT_CCTL_MOCS(mocs, mocs)); + } +} + +/* + * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround + * defined by the hardware team, but it programming general context registers. + * Adding those context register programming in context workaround + * allow us to use the wa framework for proper application and validation. + */ +static void +gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) + fakewa_disable_nestedbb_mode(engine, wal); + + gen12_ctx_gt_mocs_init(engine, wal); +} + static void __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, struct i915_wa_list *wal, @@ -651,12 +762,24 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, { struct drm_i915_private *i915 = engine->i915; - if (engine->class != RENDER_CLASS) - return; - wa_init_start(wal, name, engine->name); - if (IS_DG1(i915)) + /* Applies to all engines */ + /* + * Fake workarounds are not the actual workaround but + * programming of context registers using workaround framework. + */ + if (GRAPHICS_VER(i915) >= 12) + gen12_ctx_gt_fake_wa_init(engine, wal); + + if (engine->class != RENDER_CLASS) + goto done; + + if (IS_DG2(i915)) + dg2_ctx_workarounds_init(engine, wal); + else if (IS_XEHPSDV(i915)) + ; /* noop; none at this time */ + else if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); else if (GRAPHICS_VER(i915) == 12) gen12_ctx_workarounds_init(engine, wal); @@ -685,6 +808,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, else MISSING_CASE(GRAPHICS_VER(i915)); +done: wa_init_finish(wal); } @@ -729,7 +853,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) } static void -gen4_gt_workarounds_init(struct drm_i915_private *i915, +gen4_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */ @@ -737,29 +861,29 @@ gen4_gt_workarounds_init(struct drm_i915_private *i915, } static void -g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen4_gt_workarounds_init(i915, wal); + gen4_gt_workarounds_init(gt, wal); /* WaDisableRenderCachePipelinedFlush:g4x,ilk */ wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE); } static void -ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - g4x_gt_workarounds_init(i915, wal); + g4x_gt_workarounds_init(gt, wal); wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED); } static void -snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { } static void -ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ wa_masked_dis(wal, @@ -775,7 +899,7 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { /* WaForceL3Serialization:vlv */ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); @@ -788,7 +912,7 @@ vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { /* L3 caching of data atomics doesn't work -- disable it. */ wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); @@ -803,8 +927,51 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + unsigned int slice, subslice; + u32 mcr, mcr_mask; + + GEM_BUG_ON(GRAPHICS_VER(i915) != 9); + + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + slice = ffs(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = ffs(intel_sseu_get_subslices(sseu, slice)); + GEM_BUG_ON(!subslice); + subslice--; + + /* + * We use GEN8_MCR..() macros to calculate the |mcr| value for + * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads + */ + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + + drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); + + wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); +} + +static void +gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = gt->i915; + + /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ + gen9_wa_init_mcr(i915, wal); + /* WaDisableKillLogic:bxt,skl,kbl */ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) wa_write_or(wal, @@ -829,9 +996,9 @@ gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal } static void -skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen9_gt_workarounds_init(i915, wal); + gen9_gt_workarounds_init(gt, wal); /* WaDisableGafsUnitClkGating:skl */ wa_write_or(wal, @@ -839,19 +1006,19 @@ skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_GT_STEP(i915, STEP_A0, STEP_H0)) + if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) wa_write_or(wal, GEN9_GAMT_ECO_REG_RW_IA, GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } static void -kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen9_gt_workarounds_init(i915, wal); + gen9_gt_workarounds_init(gt, wal); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_GT_STEP(i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); @@ -868,15 +1035,15 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen9_gt_workarounds_init(i915, wal); + gen9_gt_workarounds_init(gt, wal); } static void -cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen9_gt_workarounds_init(i915, wal); + gen9_gt_workarounds_init(gt, wal); /* WaDisableGafsUnitClkGating:cfl */ wa_write_or(wal, @@ -901,21 +1068,21 @@ static void __set_mcr_steering(struct i915_wa_list *wal, wa_write_clr_set(wal, steering_reg, mcr_mask, mcr); } -static void __add_mcr_wa(struct drm_i915_private *i915, struct i915_wa_list *wal, +static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal, unsigned int slice, unsigned int subslice) { - drm_dbg(&i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice); + drm_dbg(>->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice); __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice); } static void -icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) { - const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + const struct sseu_dev_info *sseu = >->info.sseu; unsigned int slice, subslice; - GEM_BUG_ON(GRAPHICS_VER(i915) < 11); + GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11); GEM_BUG_ON(hweight8(sseu->slice_mask) > 1); slice = 0; @@ -935,16 +1102,15 @@ icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * then we can just rely on the default steering and won't need to * worry about explicitly re-steering L3BANK reads later. */ - if (i915->gt.info.l3bank_mask & BIT(subslice)) - i915->gt.steering_table[L3BANK] = NULL; + if (gt->info.l3bank_mask & BIT(subslice)) + gt->steering_table[L3BANK] = NULL; - __add_mcr_wa(i915, wal, slice, subslice); + __add_mcr_wa(gt, wal, slice, subslice); } static void xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) { - struct drm_i915_private *i915 = gt->i915; const struct sseu_dev_info *sseu = >->info.sseu; unsigned long slice, subslice = 0, slice_mask = 0; u64 dss_mask = 0; @@ -1008,7 +1174,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) WARN_ON(subslice > GEN_DSS_PER_GSLICE); WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0); - __add_mcr_wa(i915, wal, slice, subslice); + __add_mcr_wa(gt, wal, slice, subslice); /* * SQIDI ranges are special because they use different steering @@ -1024,9 +1190,11 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) } static void -icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - icl_wa_init_mcr(i915, wal); + struct drm_i915_private *i915 = gt->i915; + + icl_wa_init_mcr(gt, wal); /* WaModifyGamTlbPartitioning:icl */ wa_write_clr_set(wal, @@ -1056,9 +1224,18 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || - IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0)) + IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1077,10 +1254,9 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) * the engine-specific workaround list. */ static void -wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal) +wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal) { struct intel_engine_cs *engine; - struct intel_gt *gt = &i915->gt; int id; for_each_engine(engine, gt, id) { @@ -1094,48 +1270,51 @@ wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -gen12_gt_workarounds_init(struct drm_i915_private *i915, - struct i915_wa_list *wal) +gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - icl_wa_init_mcr(i915, wal); + icl_wa_init_mcr(gt, wal); /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */ - wa_14011060649(i915, wal); + wa_14011060649(gt, wal); /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */ wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); } static void -tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen12_gt_workarounds_init(i915, wal); + struct drm_i915_private *i915 = gt->i915; + + gen12_gt_workarounds_init(gt, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } static void -dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - gen12_gt_workarounds_init(i915, wal); + struct drm_i915_private *i915 = gt->i915; + + gen12_gt_workarounds_init(gt, wal); /* Wa_1607087056:dg1 */ - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1154,60 +1333,236 @@ dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) } static void -xehpsdv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) +xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - xehp_init_mcr(&i915->gt, wal); + struct drm_i915_private *i915 = gt->i915; + + xehp_init_mcr(gt, wal); + + /* Wa_1409757795:xehpsdv */ + wa_write_or(wal, SCCGCTL94DC, CG3DDISURB); + + /* Wa_18011725039:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { + wa_masked_dis(wal, MLTICTXCTL, TDONRENDER); + wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); + } + + /* Wa_16011155590:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + TSGUNIT_CLKGATE_DIS); + + /* Wa_14011780169:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + } + + /* Wa_14012362059:xehpsdv */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + + /* Wa_16012725990:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); + + /* Wa_14011060649:xehpsdv */ + wa_14011060649(gt, wal); + + /* Wa_14014368820:xehpsdv */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); } static void -gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) +dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { - if (IS_XEHPSDV(i915)) - xehpsdv_gt_workarounds_init(i915, wal); + struct intel_engine_cs *engine; + int id; + + xehp_init_mcr(gt, wal); + + /* Wa_14011060649:dg2 */ + wa_14011060649(gt, wal); + + /* + * Although there are per-engine instances of these registers, + * they technically exist outside the engine itself and are not + * impacted by engine resets. Furthermore, they're part of the + * GuC blacklist so trying to treat them as engine workarounds + * will result in GuC initialization failure and a wedged GPU. + */ + for_each_engine(engine, gt, id) { + if (engine->class != VIDEO_DECODE_CLASS) + continue; + + /* Wa_16010515920:dg2_g10 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) + wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base), + ALNUNIT_CLKGATE_DIS); + } + + if (IS_DG2_G10(gt->i915)) { + /* Wa_22010523718:dg2 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + CG3DDISCFEG_CLKGATE_DIS); + + /* Wa_14011006942:dg2 */ + wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE, + DSS_ROUTER_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010680813:dg2_g10 */ + wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS | + EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS); + + /* Wa_14010948348:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS); + + /* Wa_14011037102:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS); + + /* Wa_14011371254:dg2_g10 */ + wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS); + + /* Wa_14011431319:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + + /* Wa_14010569222:dg2_g10 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + GAMEDIA_CLKGATE_DIS); + + /* Wa_14011028019:dg2_g10 */ + wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012362059:dg2 */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + } + + /* Wa_1509235366:dg2 */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); + + /* Wa_14014830051:dg2 */ + wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); + + /* + * The following are not actually "workarounds" but rather + * recommended tuning settings documented in the bspec's + * performance guide section. + */ + wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); + wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS); +} + +static void +gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) +{ + struct drm_i915_private *i915 = gt->i915; + + if (IS_DG2(i915)) + dg2_gt_workarounds_init(gt, wal); + else if (IS_XEHPSDV(i915)) + xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) - dg1_gt_workarounds_init(i915, wal); + dg1_gt_workarounds_init(gt, wal); else if (IS_TIGERLAKE(i915)) - tgl_gt_workarounds_init(i915, wal); + tgl_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 12) - gen12_gt_workarounds_init(i915, wal); + gen12_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 11) - icl_gt_workarounds_init(i915, wal); + icl_gt_workarounds_init(gt, wal); else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) - cfl_gt_workarounds_init(i915, wal); + cfl_gt_workarounds_init(gt, wal); else if (IS_GEMINILAKE(i915)) - glk_gt_workarounds_init(i915, wal); + glk_gt_workarounds_init(gt, wal); else if (IS_KABYLAKE(i915)) - kbl_gt_workarounds_init(i915, wal); + kbl_gt_workarounds_init(gt, wal); else if (IS_BROXTON(i915)) - gen9_gt_workarounds_init(i915, wal); + gen9_gt_workarounds_init(gt, wal); else if (IS_SKYLAKE(i915)) - skl_gt_workarounds_init(i915, wal); + skl_gt_workarounds_init(gt, wal); else if (IS_HASWELL(i915)) - hsw_gt_workarounds_init(i915, wal); + hsw_gt_workarounds_init(gt, wal); else if (IS_VALLEYVIEW(i915)) - vlv_gt_workarounds_init(i915, wal); + vlv_gt_workarounds_init(gt, wal); else if (IS_IVYBRIDGE(i915)) - ivb_gt_workarounds_init(i915, wal); + ivb_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 6) - snb_gt_workarounds_init(i915, wal); + snb_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 5) - ilk_gt_workarounds_init(i915, wal); + ilk_gt_workarounds_init(gt, wal); else if (IS_G4X(i915)) - g4x_gt_workarounds_init(i915, wal); + g4x_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 4) - gen4_gt_workarounds_init(i915, wal); + gen4_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) <= 8) ; else MISSING_CASE(GRAPHICS_VER(i915)); } -void intel_gt_init_workarounds(struct drm_i915_private *i915) +void intel_gt_init_workarounds(struct intel_gt *gt) { - struct i915_wa_list *wal = &i915->gt_wa_list; + struct i915_wa_list *wal = >->wa_list; wa_init_start(wal, "GT", "global"); - gt_init_workarounds(i915, wal); + gt_init_workarounds(gt, wal); wa_init_finish(wal); } @@ -1278,7 +1633,7 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal) void intel_gt_apply_workarounds(struct intel_gt *gt) { - wa_list_apply(gt, >->i915->gt_wa_list); + wa_list_apply(gt, >->wa_list); } static bool wa_list_verify(struct intel_gt *gt, @@ -1310,7 +1665,7 @@ static bool wa_list_verify(struct intel_gt *gt, bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) { - return wa_list_verify(gt, >->i915->gt_wa_list, from); + return wa_list_verify(gt, >->wa_list, from); } __maybe_unused @@ -1436,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_RANGE_4); } -static void cml_whitelist_build(struct intel_engine_cs *engine) +static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; @@ -1444,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine) whitelist_reg_ext(w, RING_CTX_TIMESTAMP(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); +} + +static void cml_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); cfl_whitelist_build(engine); } @@ -1452,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* WaAllowUMDToModifyHalfSliceChicken7:icl */ @@ -1487,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1504,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* @@ -1520,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); - /* Wa_1808121037:tgl */ + /* + * Wa_1808121037:tgl + * Wa_14012131227:dg1 + * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p + */ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); /* Wa_1806527549:tgl */ whitelist_reg(w, HIZ_CHICKEN); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1541,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine) tgl_whitelist_build(engine); /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) && + if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && (engine->class == RENDER_CLASS || engine->class == COPY_ENGINE_CLASS)) whitelist_reg_ext(w, RING_ID(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); } +static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); +} + +static void dg2_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + allow_read_ctx_timestamp(engine); + + switch (engine->class) { + case RENDER_CLASS: + /* + * Wa_1507100340:dg2_g10 + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + + break; + default: + break; + } +} + void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1555,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) wa_init_start(w, "whitelist", engine->name); - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_whitelist_build(engine); + else if (IS_XEHPSDV(i915)) + xehpsdv_whitelist_build(engine); + else if (IS_DG1(i915)) dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); @@ -1604,13 +2000,144 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine) i915_mmio_reg_offset(RING_NOPID(base))); } +/* + * engine_fake_wa_init(), a place holder to program the registers + * which are not part of an official workaround defined by the + * hardware team. + * Adding programming of those register inside workaround will + * allow utilizing wa framework to proper application and verification. + */ +static void +engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) +{ + u8 mocs; + + /* + * RING_CMD_CCTL are need to be programed to un-cached + * for memory writes and reads outputted by Command + * Streamers on Gen12 onward platforms. + */ + if (GRAPHICS_VER(engine->i915) >= 12) { + mocs = engine->gt->mocs.uc_index; + wa_masked_field_set(wal, + RING_CMD_CCTL(engine->mmio_base), + CMD_CCTL_MOCS_MASK, + CMD_CCTL_MOCS_OVERRIDE(mocs, mocs)); + } +} + +static bool needs_wa_1308578152(struct intel_engine_cs *engine) +{ + u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0); + + return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0; +} + static void rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14013392000:dg2_g11 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE); + + /* Wa_16011620976:dg2_g11 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012419201:dg2 */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, + GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* + * Wa_22012826095:dg2 + * Wa_22013059131:dg2 + */ + wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, + MAXREQS_PER_BANK, + REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); + + /* Wa_22013059131:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0, + FORCE_1_SUB_MESSAGE_PER_FRAGMENT); + } + + /* Wa_1308578152:dg2_g10 when first gslice is fused off */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) && + needs_wa_1308578152(engine)) { + wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON, + GEN12_REPLAY_MODE_GRANULARITY); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || + IS_DG2_G11(engine->i915)) { + /* Wa_22013037850:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, + DISABLE_128B_EVICTION_COMMAND_UDW); + + /* Wa_22012856258:dg2 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN12_DISABLE_READ_SUPPRESSION); + + /* + * Wa_22010960976:dg2 + * Wa_14013347512:dg2 + */ + wa_masked_dis(wal, GEN12_HDC_CHICKEN0, + LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* + * Wa_1608949956:dg2_g10 + * Wa_14010198302:dg2_g10 + */ + wa_masked_en(wal, GEN8_ROW_CHICKEN, + MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE); + + /* + * Wa_14010918519:dg2_g10 + * + * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, + * so ignoring verification. + */ + wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0, + FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE, + 0, false); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_22010430635:dg2 */ + wa_masked_en(wal, + GEN9_ROW_CHICKEN4, + GEN12_DISABLE_GRF_CLEAR); + + /* Wa_14010648519:dg2 */ + wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* Wa_22012654132:dg2 */ + wa_add(wal, GEN10_CACHE_MODE_SS, 0, + _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), + 0 /* write-only, so skip validation */, + true); + } + + /* Wa_14013202645:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY); + + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || + IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1607138336:tgl[a0],dg1[a0] * Wa_1607063988:tgl[a0],dg1[a0] @@ -1620,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); } - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1655,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, @@ -1668,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1607030317:tgl @@ -1752,15 +2278,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] @@ -2031,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) { + if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { wa_write(wal, RING_SEMA_WAIT_POLL(engine->mmio_base), 1); @@ -2044,6 +2561,8 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4)) return; + engine_fake_wa_init(engine, wal); + if (engine->class == RENDER_CLASS) rcs_engine_wa_init(engine, wal); else @@ -2067,12 +2586,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine) wa_list_apply(engine->gt, &engine->wa_list); } -struct mcr_range { - u32 start; - u32 end; -}; - -static const struct mcr_range mcr_ranges_gen8[] = { +static const struct i915_range mcr_ranges_gen8[] = { { .start = 0x5500, .end = 0x55ff }, { .start = 0x7000, .end = 0x7fff }, { .start = 0x9400, .end = 0x97ff }, @@ -2081,7 +2595,7 @@ static const struct mcr_range mcr_ranges_gen8[] = { {}, }; -static const struct mcr_range mcr_ranges_gen12[] = { +static const struct i915_range mcr_ranges_gen12[] = { { .start = 0x8150, .end = 0x815f }, { .start = 0x9520, .end = 0x955f }, { .start = 0xb100, .end = 0xb3ff }, @@ -2090,7 +2604,7 @@ static const struct mcr_range mcr_ranges_gen12[] = { {}, }; -static const struct mcr_range mcr_ranges_xehp[] = { +static const struct i915_range mcr_ranges_xehp[] = { { .start = 0x4000, .end = 0x4aff }, { .start = 0x5200, .end = 0x52ff }, { .start = 0x5400, .end = 0x7fff }, @@ -2109,7 +2623,7 @@ static const struct mcr_range mcr_ranges_xehp[] = { static bool mcr_range(struct drm_i915_private *i915, u32 offset) { - const struct mcr_range *mcr_ranges; + const struct i915_range *mcr_ranges; int i; if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h index 15abb68b6c00..9beaab77c7f0 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h @@ -24,7 +24,7 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal) void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); int intel_engine_emit_ctx_wa(struct i915_request *rq); -void intel_gt_init_workarounds(struct drm_i915_private *i915); +void intel_gt_init_workarounds(struct intel_gt *gt); void intel_gt_apply_workarounds(struct intel_gt *gt); bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 2c1af030310c..bb99fc03f503 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl) atomic_dec(&tl->pin_count); } +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { - const unsigned long sz = PAGE_SIZE / 2; + const unsigned long sz = PAGE_SIZE; struct intel_ring *ring; ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); @@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->vaddr = (void *)(ring + 1); atomic_set(&ring->pin_count, 1); - ring->vma = i915_vma_alloc(); - if (!ring->vma) { + ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); + if (IS_ERR(ring->vma)) { kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL, 0); - __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); - __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); - ring->vma->node.size = sz; intel_ring_update_space(ring); @@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) static void mock_ring_free(struct intel_ring *ring) { - i915_active_fini(&ring->vma->active); - i915_vma_free(ring->vma); + i915_vma_put(ring->vma); kfree(ring); } @@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce) static void mock_context_post_unpin(struct intel_context *ce) { + i915_vma_unpin(ce->ring->vma); } static void mock_context_destroy(struct kref *ref) @@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **unused) { - return 0; + return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH); } static int mock_context_pin(struct intel_context *ce, void *unused) @@ -376,6 +394,8 @@ int mock_engine_init(struct intel_engine_cs *engine) { struct intel_context *ce; + INIT_LIST_HEAD(&engine->pinned_contexts_list); + engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK); if (!engine->sched_engine) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 317eebf086c3..6e6e4d747cca 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -290,7 +290,7 @@ static int live_heartbeat_fast(void *arg) int err = 0; /* Check that the heartbeat ticks at the desired rate. */ - if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) return 0; for_each_engine(engine, gt, id) { @@ -352,7 +352,7 @@ static int live_heartbeat_off(void *arg) int err = 0; /* Check that we can turn off heartbeat and not interrupt VIP */ - if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) return 0; for_each_engine(engine, gt, id) { diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75569666105d..75f6efc9882f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg) return 0; } +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness) +{ + ktime_t start, unused, dt; + + if (!intel_engine_uses_guc(engine)) + return 0; + + /* + * In GuC mode of submission, the busyness stats may get updated after + * the batch starts running. Poll for a change in busyness and timeout + * after 500 us. + */ + start = ktime_get(); + while (intel_engine_get_busy_time(engine, &unused) == busyness) { + dt = ktime_get() - start; + if (dt > 500000) { + pr_err("active wait timed out %lld\n", dt); + ENGINE_TRACE(engine, "active wait time out %lld\n", dt); + return -ETIME; + } + } + + return 0; +} + static int live_engine_busy_stats(void *arg) { struct intel_gt *gt = arg; @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg) GEM_BUG_ON(intel_gt_pm_is_awake(gt)); for_each_engine(engine, gt, id) { struct i915_request *rq; + ktime_t busyness, dummy; ktime_t de, dt; ktime_t t[2]; @@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg) } i915_request_add(rq); + busyness = intel_engine_get_busy_time(engine, &dummy); if (!igt_wait_for_spinner(&spin, rq)) { intel_gt_set_wedged(engine->gt); err = -ETIME; goto end; } + err = __spin_until_busier(engine, busyness); + if (err) { + GEM_TRACE_DUMP(); + goto end; + } + ENGINE_TRACE(engine, "measuring busy time\n"); preempt_disable(); de = intel_engine_get_busy_time(engine, &t[0]); - udelay(100); + mdelay(10); de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); dt = ktime_sub(t[1], t[0]); diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c index f12ffe797639..b367ecfa42de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -992,7 +992,7 @@ static int live_timeslice_preempt(void *arg) * need to preempt the current task and replace it with another * ready task. */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (!CONFIG_DRM_I915_TIMESLICE_DURATION) return 0; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); @@ -1122,7 +1122,7 @@ static int live_timeslice_rewind(void *arg) * but only a few of those requests, forcing us to rewind the * RING_TAIL of the original request. */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (!CONFIG_DRM_I915_TIMESLICE_DURATION) return 0; for_each_engine(engine, gt, id) { @@ -1299,7 +1299,7 @@ static int live_timeslice_queue(void *arg) * ELSP[1] is already occupied, so must rely on timeslicing to * eject ELSP[0] in favour of the queue.) */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (!CONFIG_DRM_I915_TIMESLICE_DURATION) return 0; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); @@ -1420,7 +1420,7 @@ static int live_timeslice_nopreempt(void *arg) * We should not timeslice into a request that is marked with * I915_REQUEST_NOPREEMPT. */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + if (!CONFIG_DRM_I915_TIMESLICE_DURATION) return 0; if (igt_spinner_init(&spin, gt)) @@ -2260,7 +2260,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) int err; /* Preempt cancel non-preemptible spinner in ELSP0 */ - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) return 0; if (!intel_has_reset_engine(arg->engine->gt)) @@ -2316,7 +2316,7 @@ static int __cancel_fail(struct live_preempt_cancel *arg) struct i915_request *rq; int err; - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) return 0; if (!intel_has_reset_engine(engine->gt)) @@ -3375,7 +3375,7 @@ static int live_preempt_timeout(void *arg) * Check that we force preemption to occur by cancelling the previous * context if it refuses to yield the GPU. */ - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) return 0; if (!intel_has_reset_engine(gt)) @@ -3493,7 +3493,7 @@ static int smoke_submit(struct preempt_smoke *smoke, if (batch) { struct i915_address_space *vm; - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); vma = i915_vma_instance(batch, vm, NULL); i915_vm_put(vm); if (IS_ERR(vma)) @@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt, GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); for (n = 0; n < nctx; n++) { - ve[n] = intel_engine_create_virtual(siblings, nsibling); + ve[n] = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve[n])) { err = PTR_ERR(ve[n]); nctx = n; @@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt, * restrict it to our desired engine within the virtual engine. */ - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_close; @@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt, i915_request_add(rq); } - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt, /* XXX We do not handle oversubscription and fairness with normal rq */ for (n = 0; n < nsibling; n++) { - ce = intel_engine_create_virtual(siblings, nsibling); + ce = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt, if (err) goto out_scratch; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_scratch; @@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt, if (igt_spinner_init(&spin, gt)) return -ENOMEM; - ve = intel_engine_create_virtual(siblings, nsibling); + ve = intel_engine_create_virtual(siblings, nsibling, 0); if (IS_ERR(ve)) { err = PTR_ERR(ve); goto out_spin; diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index b9441217ca3d..55c5cdb99f45 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -43,7 +43,7 @@ static void measure_clocks(struct intel_engine_cs *engine, int i; for (i = 0; i < 5; i++) { - preempt_disable(); + local_irq_disable(); cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP); dt[i] = ktime_get(); @@ -51,7 +51,7 @@ static void measure_clocks(struct intel_engine_cs *engine, dt[i] = ktime_sub(ktime_get(), dt[i]); cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP); - preempt_enable(); + local_irq_enable(); } /* Use the median of both cycle/dt; close enough */ diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 2c1ed32ca5ac..e5ad4d5a91c0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -117,7 +117,7 @@ static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct intel_gt *gt = h->gt; - struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx); + struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx); struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; @@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg) count = 0; st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { int i; @@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg) } st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); force_reset_timeout(engine); err = intel_engine_reset(engine, NULL); @@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg) out: pr_info("%s(%s): %d resets\n", __func__, engine->name, count); skip: - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); intel_context_put(ce); @@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_engine_count = i915_reset_engine_count(global, engine); st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); count = 0; do { struct i915_request *rq = NULL; @@ -789,7 +792,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) if (err) pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", engine->name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id, err); + rq->fence.seqno, rq->context->guc_id.id, err); } skip: @@ -824,7 +827,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s: Completed %lu %s resets\n", engine->name, count, active ? "active" : "idle"); @@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ st_engine_heartbeat_disable_no_pm(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { struct i915_request *rq = NULL; struct intel_selftest_saved_policy saved; @@ -1098,7 +1102,7 @@ static int __igt_reset_engines(struct intel_gt *gt, if (err) pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n", engine->name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id, err); + rq->fence.seqno, rq->context->guc_id.id, err); } count++; @@ -1108,7 +1112,7 @@ static int __igt_reset_engines(struct intel_gt *gt, pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n", engine->name, test_name, rq->fence.context, - rq->fence.seqno, rq->context->guc_id); + rq->fence.seqno, rq->context->guc_id.id); i915_request_put(rq); GEM_TRACE_DUMP(); @@ -1165,7 +1169,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable_no_pm(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", @@ -1596,7 +1600,7 @@ static int igt_reset_evict_ppgtt(void *arg) if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL) return 0; - ppgtt = i915_ppgtt_create(gt); + ppgtt = i915_ppgtt_create(gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 12ef2837c89b..e21787301bbd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate, if (IS_ERR(src)) return 0; + sz = src->base.size; dst = i915_gem_object_create_internal(i915, sz); if (IS_ERR(dst)) goto err_free_src; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index e623ac45f4aa..962e91ba3be4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -66,7 +66,7 @@ reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) memset(lists, 0, sizeof(*lists)); wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); - gt_init_workarounds(gt->i915, &lists->gt_wa_list); + gt_init_workarounds(gt, &lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list); for_each_engine(engine, gt, id) { diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index 8ff582222aff..fe5d7d261797 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -142,7 +142,9 @@ enum intel_guc_action { INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, + INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h index 99e1fad5ca20..c9086a600bce 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h @@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64); * | +-------+--------------------------------------------------------------+ * | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message | * +---+-------+--------------------------------------------------------------+ - * | 1 | 31:0 | +--------------------------------------------------------+ | - * +---+-------+ | | | - * |...| | | Embedded `HXG Message`_ | | - * +---+-------+ | | | - * | n | 31:0 | +--------------------------------------------------------+ | + * | 1 | 31:0 | | + * +---+-------+ | + * |...| | [Embedded `HXG Message`_] | + * +---+-------+ | + * | n | 31:0 | | * +---+-------+--------------------------------------------------------------+ */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h index bbf1ddb77434..9baa3cb07d13 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h @@ -38,11 +38,11 @@ * +---+-------+--------------------------------------------------------------+ * | | Bits | Description | * +===+=======+==============================================================+ - * | 0 | 31:0 | +--------------------------------------------------------+ | - * +---+-------+ | | | - * |...| | | Embedded `HXG Message`_ | | - * +---+-------+ | | | - * | n | 31:0 | +--------------------------------------------------------+ | + * | 0 | 31:0 | | + * +---+-------+ | + * |...| | [Embedded `HXG Message`_] | + * +---+-------+ | + * | n | 31:0 | | * +---+-------+--------------------------------------------------------------+ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index fbfcae727d7f..6e228343e8cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -3,6 +3,7 @@ * Copyright © 2014-2019 Intel Corporation */ +#include "gem/i915_gem_lmem.h" #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" @@ -647,7 +648,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) u64 flags; int ret; - obj = i915_gem_object_create_shmem(gt->i915, size); + if (HAS_LMEM(gt->i915)) + obj = i915_gem_object_create_lmem(gt->i915, size, + I915_BO_ALLOC_CPU_CLEAR | + I915_BO_ALLOC_CONTIGUOUS | + I915_BO_ALLOC_PM_EARLY); + else + obj = i915_gem_object_create_shmem(gt->i915, size); + if (IS_ERR(obj)) return ERR_CAST(obj); @@ -748,3 +756,32 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) } } } + +void intel_guc_write_barrier(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { + /* + * Ensure intel_uncore_write_fw can be used rather than + * intel_uncore_write. + */ + GEM_BUG_ON(guc->send_regs.fw_domains); + + /* + * This register is used by the i915 and GuC for MMIO based + * communication. Once we are in this code CTBs are the only + * method the i915 uses to communicate with the GuC so it is + * safe to write to this register (a value of 0 is NOP for MMIO + * communication). If we ever start mixing CTBs and MMIOs a new + * register will have to be chosen. This function is also used + * to enforce ordering of a work queue item write and an update + * to the process descriptor. When a work queue is being used, + * CTBs are also the only mechanism of communication. + */ + intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); + } else { + /* wmb() sufficient for a barrier if in smem */ + wmb(); + } +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2e27fe59786b..1cb46098030d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -22,75 +22,186 @@ struct __guc_ads_blob; -/* - * Top level structure of GuC. It handles firmware loading and manages client - * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList - * submission. +/** + * struct intel_guc - Top level structure of GuC. + * + * It handles firmware loading and manages client pool. intel_guc owns an + * i915_sched_engine for submission. */ struct intel_guc { + /** @fw: the GuC firmware */ struct intel_uc_fw fw; + /** @log: sub-structure containing GuC log related data and objects */ struct intel_guc_log log; + /** @ct: the command transport communication channel */ struct intel_guc_ct ct; + /** @slpc: sub-structure containing SLPC related data and objects */ struct intel_guc_slpc slpc; - /* Global engine used to submit requests to GuC */ + /** @sched_engine: Global engine used to submit requests to GuC */ struct i915_sched_engine *sched_engine; + /** + * @stalled_request: if GuC can't process a request for any reason, we + * save it until GuC restarts processing. No other request can be + * submitted until the stalled request is processed. + */ struct i915_request *stalled_request; + /** + * @submission_stall_reason: reason why submission is stalled + */ + enum { + STALL_NONE, + STALL_REGISTER_CONTEXT, + STALL_MOVE_LRC_TAIL, + STALL_ADD_REQUEST, + } submission_stall_reason; /* intel_guc_recv interrupt related state */ + /** @irq_lock: protects GuC irq state */ spinlock_t irq_lock; + /** + * @msg_enabled_mask: mask of events that are processed when receiving + * an INTEL_GUC_ACTION_DEFAULT G2H message. + */ unsigned int msg_enabled_mask; + /** + * @outstanding_submission_g2h: number of outstanding GuC to Host + * responses related to GuC submission, used to determine if the GT is + * idle + */ atomic_t outstanding_submission_g2h; + /** @interrupts: pointers to GuC interrupt-managing functions. */ struct { void (*reset)(struct intel_guc *guc); void (*enable)(struct intel_guc *guc); void (*disable)(struct intel_guc *guc); } interrupts; - /* - * contexts_lock protects the pool of free guc ids and a linked list of - * guc ids available to be stolen + /** + * @submission_state: sub-structure for submission state protected by + * single lock + */ + struct { + /** + * @lock: protects everything in submission_state, + * ce->guc_id.id, and ce->guc_id.ref when transitioning in and + * out of zero + */ + spinlock_t lock; + /** + * @guc_ids: used to allocate new guc_ids, single-lrc + */ + struct ida guc_ids; + /** + * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc + */ + unsigned long *guc_ids_bitmap; + /** + * @guc_id_list: list of intel_context with valid guc_ids but no + * refs + */ + struct list_head guc_id_list; + /** + * @destroyed_contexts: list of contexts waiting to be destroyed + * (deregistered with the GuC) + */ + struct list_head destroyed_contexts; + /** + * @destroyed_worker: worker to deregister contexts, need as we + * need to take a GT PM reference and can't from destroy + * function as it might be in an atomic context (no sleeping) + */ + struct work_struct destroyed_worker; + } submission_state; + + /** + * @submission_supported: tracks whether we support GuC submission on + * the current platform */ - spinlock_t contexts_lock; - struct ida guc_ids; - struct list_head guc_id_list; - bool submission_supported; + /** @submission_selected: tracks whether the user enabled GuC submission */ bool submission_selected; + /** + * @rc_supported: tracks whether we support GuC rc on the current platform + */ bool rc_supported; + /** @rc_selected: tracks whether the user enabled GuC rc */ bool rc_selected; + /** @ads_vma: object allocated to hold the GuC ADS */ struct i915_vma *ads_vma; + /** @ads_blob: contents of the GuC ADS */ struct __guc_ads_blob *ads_blob; + /** @ads_regset_size: size of the save/restore regsets in the ADS */ u32 ads_regset_size; + /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_engine_usage_size: size of engine usage in the ADS */ + u32 ads_engine_usage_size; + /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ struct i915_vma *lrc_desc_pool; + /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */ void *lrc_desc_pool_vaddr; - /* guc_id to intel_context lookup */ + /** + * @context_lookup: used to resolve intel_context from guc_id, if a + * context is present in this structure it is registered with the GuC + */ struct xarray context_lookup; - /* Control params for fw initialization */ + /** @params: Control params for fw initialization */ u32 params[GUC_CTL_MAX_DWORDS]; - /* GuC's FW specific registers used in MMIO send */ + /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */ struct { u32 base; unsigned int count; enum forcewake_domains fw_domains; } send_regs; - /* register used to send interrupts to the GuC FW */ + /** @notify_reg: register used to send interrupts to the GuC FW */ i915_reg_t notify_reg; - /* Store msg (e.g. log flush) that we see while CTBs are disabled */ + /** + * @mmio_msg: notification bitmask that the GuC writes in one of its + * registers when the CT channel is disabled, to be processed when the + * channel is back up. + */ u32 mmio_msg; - /* To serialize the intel_guc_send actions */ + /** @send_mutex: used to serialize the intel_guc_send actions */ struct mutex send_mutex; + + /** + * @timestamp: GT timestamp object that stores a copy of the timestamp + * and adjusts it for overflow using a worker. + */ + struct { + /** + * @lock: Lock protecting the below fields and the engine stats. + */ + spinlock_t lock; + + /** + * @gt_stamp: 64 bit extended value of the GT timestamp. + */ + u64 gt_stamp; + + /** + * @ping_delay: Period for polling the GT timestamp for + * overflow. + */ + unsigned long ping_delay; + + /** + * @work: Periodic work to adjust GT timestamp, engine and + * context usage for overflows. + */ + struct delayed_work work; + } timestamp; }; static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) @@ -295,4 +406,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc); void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); +void intel_guc_write_barrier(struct intel_guc *guc); + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 6926919bcac6..1a1edae67e4e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -26,6 +26,8 @@ * | guc_policies | * +---------------------------------------+ * | guc_gt_system_info | + * +---------------------------------------+ + * | guc_engine_usage | * +---------------------------------------+ <== static * | guc_mmio_reg[countA] (engine 0.0) | * | guc_mmio_reg[countB] (engine 0.1) | @@ -47,6 +49,7 @@ struct __guc_ads_blob { struct guc_ads ads; struct guc_policies policies; struct guc_gt_system_info system_info; + struct guc_engine_usage engine_usage; /* From here on, location is dynamic! Refer to above diagram. */ struct guc_mmio_reg regset[0]; } __packed; @@ -176,7 +179,7 @@ static void guc_mapping_table_init(struct intel_gt *gt, for_each_engine(engine, gt, id) { u8 guc_class = engine_class_to_guc_class(engine->class); - system_info->mapping_table[guc_class][engine->instance] = + system_info->mapping_table[guc_class][ilog2(engine->logical_mask)] = engine->instance; } } @@ -349,6 +352,8 @@ static void fill_engine_enable_masks(struct intel_gt *gt, info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); } +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) +#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE) static int guc_prep_golden_context(struct intel_guc *guc, struct __guc_ads_blob *blob) { @@ -396,7 +401,18 @@ static int guc_prep_golden_context(struct intel_guc *guc, if (!blob) continue; - blob->ads.eng_state_size[guc_class] = real_size; + /* + * This interface is slightly confusing. We need to pass the + * base address of the full golden context and the size of just + * the engine state, which is the section of the context image + * that starts after the execlists context. This is required to + * allow the GuC to restore just the engine state when a + * watchdog reset occurs. + * We calculate the engine state size by removing the size of + * what comes before it in the context image (which is identical + * on all engines). + */ + blob->ads.eng_state_size[guc_class] = real_size - LRC_SKIP_SIZE; blob->ads.golden_context_lrca[guc_class] = addr_ggtt; addr_ggtt += alloc_size; } @@ -436,11 +452,6 @@ static void guc_init_golden_context(struct intel_guc *guc) u8 engine_class, guc_class; u8 *ptr; - /* Skip execlist and PPGTT registers + HWSP */ - const u32 lr_hw_context_size = 80 * sizeof(u32); - const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + - lr_hw_context_size; - if (!intel_uc_uses_guc_submission(>->uc)) return; @@ -476,12 +487,12 @@ static void guc_init_golden_context(struct intel_guc *guc) continue; } - GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size); + GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != + real_size - LRC_SKIP_SIZE); GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); addr_ggtt += alloc_size; - shmem_read(engine->default_state, skip_size, ptr + skip_size, - real_size - skip_size); + shmem_read(engine->default_state, 0, ptr, real_size); ptr += alloc_size; } @@ -620,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc) guc_ads_private_data_reset(guc); } + +u32 intel_guc_engine_usage_offset(struct intel_guc *guc) +{ + struct __guc_ads_blob *blob = guc->ads_blob; + u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma); + u32 offset = base + ptr_offset(blob, engine_usage); + + return offset; +} + +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine) +{ + struct intel_guc *guc = &engine->gt->uc.guc; + struct __guc_ads_blob *blob = guc->ads_blob; + u8 guc_class = engine_class_to_guc_class(engine->class); + + return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)]; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index 3d85051d57e4..e74c110facff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -6,8 +6,11 @@ #ifndef _INTEL_GUC_ADS_H_ #define _INTEL_GUC_ADS_H_ +#include <linux/types.h> + struct intel_guc; struct drm_printer; +struct intel_engine_cs; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc); void intel_guc_ads_reset(struct intel_guc *guc); void intel_guc_ads_print_policy_info(struct intel_guc *guc, struct drm_printer *p); +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine); +u32 intel_guc_engine_usage_offset(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index 22b4733b55e2..a0cc34be7b56 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -168,12 +168,15 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type, FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr), FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr), }; + int ret; GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST); GEM_BUG_ON(size % SZ_4K); /* CT registration must go over MMIO */ - return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + + return ret > 0 ? -EPROTO : ret; } static int ct_register_buffer(struct intel_guc_ct *ct, u32 type, @@ -188,8 +191,8 @@ static int ct_register_buffer(struct intel_guc_ct *ct, u32 type, err = guc_action_register_ct_buffer(ct_to_guc(ct), type, desc_addr, buff_addr, size); if (unlikely(err)) - CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n", - guc_ct_buffer_type_to_str(type), err); + CT_ERROR(ct, "Failed to register %s buffer (%pe)\n", + guc_ct_buffer_type_to_str(type), ERR_PTR(err)); return err; } @@ -201,11 +204,14 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type) FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB), FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type), }; + int ret; GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST); /* CT deregistration must go over MMIO */ - return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + + return ret > 0 ? -EPROTO : ret; } static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) @@ -213,8 +219,8 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type); if (unlikely(err)) - CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n", - guc_ct_buffer_type_to_str(type), err); + CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n", + guc_ct_buffer_type_to_str(type), ERR_PTR(err)); return err; } @@ -377,28 +383,6 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) return ++ct->requests.last_fence; } -static void write_barrier(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_gt *gt = guc_to_gt(guc); - - if (i915_gem_object_is_lmem(guc->ct.vma->obj)) { - GEM_BUG_ON(guc->send_regs.fw_domains); - /* - * This register is used by the i915 and GuC for MMIO based - * communication. Once we are in this code CTBs are the only - * method the i915 uses to communicate with the GuC so it is - * safe to write to this register (a value of 0 is NOP for MMIO - * communication). If we ever start mixing CTBs and MMIOs a new - * register will have to be chosen. - */ - intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0); - } else { - /* wmb() sufficient for a barrier if in smem */ - wmb(); - } -} - static int ct_write(struct intel_guc_ct *ct, const u32 *action, u32 len /* in dwords */, @@ -468,7 +452,7 @@ static int ct_write(struct intel_guc_ct *ct, * make sure H2G buffer update and LRC tail update (if this triggering a * submission) are visible before updating the descriptor tail */ - write_barrier(ct); + intel_guc_write_barrier(ct_to_guc(ct)); /* update local copies */ ctb->tail = tail; @@ -522,9 +506,6 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status) err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS); #undef done - if (unlikely(err)) - DRM_ERROR("CT: fence %u err %d\n", req->fence, err); - *status = req->status; return err; } @@ -722,8 +703,11 @@ retry: err = wait_for_ct_request_update(&request, status); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); - if (unlikely(err)) + if (unlikely(err)) { + CT_ERROR(ct, "No response for request %#x (fence %u)\n", + action[0], request.fence); goto unlink; + } if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { err = -EIO; @@ -775,8 +759,8 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { - CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n", - action[0], ret, status); + CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n", + action[0], ERR_PTR(ret), status); } else if (unlikely(ret)) { CT_DEBUG(ct, "send action %#x returned %d (%#x)\n", action[0], ret, ret); @@ -1042,9 +1026,9 @@ static void ct_incoming_request_worker_func(struct work_struct *w) container_of(w, struct intel_guc_ct, requests.worker); bool done; - done = ct_process_incoming_requests(ct); - if (!done) - queue_work(system_unbound_wq, &ct->requests.worker); + do { + done = ct_process_incoming_requests(ct); + } while (!done); } static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c index 887c8c8f35db..25f09a420561 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c @@ -5,14 +5,14 @@ #include <drm/drm_print.h> -#include "gt/debugfs_gt.h" +#include "gt/intel_gt_debugfs.h" +#include "gt/uc/intel_guc_ads.h" +#include "gt/uc/intel_guc_ct.h" +#include "gt/uc/intel_guc_slpc.h" +#include "gt/uc/intel_guc_submission.h" #include "intel_guc.h" #include "intel_guc_debugfs.h" #include "intel_guc_log_debugfs.h" -#include "gt/uc/intel_guc_ct.h" -#include "gt/uc/intel_guc_ads.h" -#include "gt/uc/intel_guc_submission.h" -#include "gt/uc/intel_guc_slpc.h" static int guc_info_show(struct seq_file *m, void *data) { @@ -35,7 +35,7 @@ static int guc_info_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_info); static int guc_registered_contexts_show(struct seq_file *m, void *data) { @@ -49,7 +49,7 @@ static int guc_registered_contexts_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts); static int guc_slpc_info_show(struct seq_file *m, void *unused) { @@ -62,7 +62,7 @@ static int guc_slpc_info_show(struct seq_file *m, void *unused) return intel_guc_slpc_print_info(slpc, &p); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info); static bool intel_eval_slpc_support(void *data) { @@ -73,7 +73,7 @@ static bool intel_eval_slpc_support(void *data) void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "guc_info", &guc_info_fops, NULL }, { "guc_registered_contexts", &guc_registered_contexts_fops, NULL }, { "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support}, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 76fe766ad1bc..196424be0998 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -41,18 +41,21 @@ static void guc_prepare_xfer(struct intel_uncore *uncore) } /* Copy RSA signature from the fw image to HW for verification */ -static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, - struct intel_uncore *uncore) +static int guc_xfer_rsa(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) { u32 rsa[UOS_RSA_SCRATCH_COUNT]; size_t copied; int i; copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa)); - GEM_BUG_ON(copied < sizeof(rsa)); + if (copied < sizeof(rsa)) + return -ENOMEM; for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); + + return 0; } /* @@ -141,7 +144,9 @@ int intel_guc_fw_upload(struct intel_guc *guc) * by the DMA engine in one operation, whereas the RSA signature is * loaded via MMIO. */ - guc_xfer_rsa(&guc->fw, uncore); + ret = guc_xfer_rsa(&guc->fw, uncore); + if (ret) + goto out; /* * Current uCode expects the code to be loaded at 8k; locations below diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index fa4be13c8854..7072e30e99f4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -52,27 +52,27 @@ #define GUC_DOORBELL_INVALID 256 -#define GUC_WQ_SIZE (PAGE_SIZE * 2) - -/* Work queue item header definitions */ +/* + * Work queue item header definitions + * + * Work queue is circular buffer used to submit complex (multi-lrc) submissions + * to the GuC. A work queue item is an entry in the circular buffer. + */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 #define WQ_STATUS_CMD_ERROR 3 #define WQ_STATUS_ENGINE_ID_NOT_USED 4 #define WQ_STATUS_SUSPENDED_FROM_RESET 5 -#define WQ_TYPE_SHIFT 0 -#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) -#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) -#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) -#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) -#define WQ_TARGET_SHIFT 10 -#define WQ_LEN_SHIFT 16 -#define WQ_NO_WCFLUSH_WAIT (1 << 27) -#define WQ_PRESENT_WORKLOAD (1 << 28) - -#define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ -#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) +#define WQ_TYPE_BATCH_BUF 0x1 +#define WQ_TYPE_PSEUDO 0x2 +#define WQ_TYPE_INORDER 0x3 +#define WQ_TYPE_NOOP 0x4 +#define WQ_TYPE_MULTI_LRC 0x5 +#define WQ_TYPE_MASK GENMASK(7, 0) +#define WQ_LEN_MASK GENMASK(26, 16) + +#define WQ_GUC_ID_MASK GENMASK(15, 0) +#define WQ_RING_TAIL_MASK GENMASK(28, 18) #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) @@ -186,7 +186,7 @@ struct guc_process_desc { u32 wq_status; u32 engine_presence; u32 priority; - u32 reserved[30]; + u32 reserved[36]; } __packed; #define CONTEXT_REGISTRATION_FLAG_KMD BIT(0) @@ -294,6 +294,19 @@ struct guc_ads { u32 reserved[15]; } __packed; +/* Engine usage stats */ +struct guc_engine_usage_record { + u32 current_context_index; + u32 last_switch_in_stamp; + u32 reserved0; + u32 total_runtime; + u32 reserved1[4]; +} __packed; + +struct guc_engine_usage { + struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; +} __packed; + /* GuC logging structures */ enum guc_log_buffer_type { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c index 64e0b86bf258..46026c2c1722 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c @@ -6,7 +6,7 @@ #include <linux/fs.h> #include <drm/drm_print.h> -#include "gt/debugfs_gt.h" +#include "gt/intel_gt_debugfs.h" #include "intel_guc.h" #include "intel_guc_log.h" #include "intel_guc_log_debugfs.h" @@ -17,7 +17,7 @@ static int guc_log_dump_show(struct seq_file *m, void *data) return intel_guc_log_dump(m->private, &p, false); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump); static int guc_load_err_log_dump_show(struct seq_file *m, void *data) { @@ -25,7 +25,7 @@ static int guc_load_err_log_dump_show(struct seq_file *m, void *data) return intel_guc_log_dump(m->private, &p, true); } -DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump); static int guc_log_level_get(void *data, u64 *val) { @@ -109,7 +109,7 @@ static const struct file_operations guc_log_relay_fops = { void intel_guc_log_debugfs_register(struct intel_guc_log *log, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "guc_log_dump", &guc_log_dump_fops, NULL }, { "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL }, { "guc_log_level", &guc_log_level_fops, NULL }, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..22c1c12369f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data, slpc_mem_set_param(data, enable_id, 0); } -int intel_guc_slpc_init(struct intel_guc_slpc *slpc) -{ - struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); - u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); - int err; - - GEM_BUG_ON(slpc->vma); - - err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); - if (unlikely(err)) { - drm_err(&i915->drm, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); - return err; - } - - slpc->max_freq_softlimit = 0; - slpc->min_freq_softlimit = 0; - - return err; -} - static u32 slpc_get_state(struct intel_guc_slpc *slpc) { struct slpc_shared_data *data; @@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, return guc_action_slpc_unset_param(guc, id); } +static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) +{ + struct drm_i915_private *i915 = slpc_to_i915(slpc); + struct intel_guc *guc = slpc_to_guc(slpc); + intel_wakeref_t wakeref; + int ret = 0; + + lockdep_assert_held(&slpc->lock); + + if (!intel_guc_is_ready(guc)) + return -ENODEV; + + /* + * This function is a little different as compared to + * intel_guc_slpc_set_min_freq(). Softlimit will not be updated + * here since this is used to temporarily change min freq, + * for example, during a waitboost. Caller is responsible for + * checking bounds. + */ + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + freq); + if (ret) + drm_err(&i915->drm, "Unable to force min freq to %u: %d", + freq, ret); + } + + return ret; +} + +static void slpc_boost_work(struct work_struct *work) +{ + struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); + + /* + * Raise min freq to boost. It's possible that + * this is greater than current max. But it will + * certainly be limited by RP0. An error setting + * the min param is not fatal. + */ + mutex_lock(&slpc->lock); + if (atomic_read(&slpc->num_waiters)) { + slpc_force_min_freq(slpc, slpc->boost_freq); + slpc->num_boosts++; + } + mutex_unlock(&slpc->lock); +} + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc) +{ + struct intel_guc *guc = slpc_to_guc(slpc); + struct drm_i915_private *i915 = slpc_to_i915(slpc); + u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; + + GEM_BUG_ON(slpc->vma); + + err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); + if (unlikely(err)) { + drm_err(&i915->drm, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); + return err; + } + + slpc->max_freq_softlimit = 0; + slpc->min_freq_softlimit = 0; + + slpc->boost_freq = 0; + atomic_set(&slpc->num_waiters, 0); + slpc->num_boosts = 0; + + mutex_init(&slpc->lock); + INIT_WORK(&slpc->boost_work, slpc_boost_work); + + return err; +} + static const char *slpc_global_state_to_string(enum slpc_global_state state) { switch (state) { @@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) val > slpc->max_freq_softlimit) return -EINVAL; + /* Need a lock now since waitboost can be modifying min as well */ + mutex_lock(&slpc->lock); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, val); @@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) if (!ret) slpc->min_freq_softlimit = val; + mutex_unlock(&slpc->lock); + return ret; } @@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc) GT_FREQUENCY_MULTIPLIER; slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * GT_FREQUENCY_MULTIPLIER; + + if (!slpc->boost_freq) + slpc->boost_freq = slpc->rp0_freq; } /* @@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return 0; } +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) +{ + int ret = 0; + + if (val < slpc->min_freq || val > slpc->rp0_freq) + return -EINVAL; + + mutex_lock(&slpc->lock); + + if (slpc->boost_freq != val) { + /* Apply only if there are active waiters */ + if (atomic_read(&slpc->num_waiters)) { + ret = slpc_force_min_freq(slpc, val); + if (ret) { + ret = -EIO; + goto done; + } + } + + slpc->boost_freq = val; + } + +done: + mutex_unlock(&slpc->lock); + return ret; +} + +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) +{ + /* + * Return min back to the softlimit. + * This is called during request retire, + * so we don't need to fail that if the + * set_param fails. + */ + mutex_lock(&slpc->lock); + if (atomic_dec_and_test(&slpc->num_waiters)) + slpc_force_min_freq(slpc, slpc->min_freq_softlimit); + mutex_unlock(&slpc->lock); +} + int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) { struct drm_i915_private *i915 = slpc_to_i915(slpc); @@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p slpc_decode_max_freq(slpc)); drm_printf(p, "\tMin freq: %u MHz\n", slpc_decode_min_freq(slpc)); + drm_printf(p, "\twaitboosts: %u\n", + slpc->num_boosts); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index e45054d5b9b4..0caa8fee3c04 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc); void intel_guc_slpc_fini(struct intel_guc_slpc *slpc); int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val); +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p); void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); +void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h index 41d13527666f..bf5b9a563c09 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -6,6 +6,9 @@ #ifndef _INTEL_GUC_SLPC_TYPES_H_ #define _INTEL_GUC_SLPC_TYPES_H_ +#include <linux/atomic.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/types.h> #define SLPC_RESET_TIMEOUT_MS 5 @@ -20,10 +23,20 @@ struct intel_guc_slpc { u32 min_freq; u32 rp0_freq; u32 rp1_freq; + u32 boost_freq; /* frequency softlimits */ u32 min_freq_softlimit; u32 max_freq_softlimit; + + /* Protects set/reset of boost freq + * and value of num_waiters + */ + struct mutex lock; + + struct work_struct boost_work; + atomic_t num_waiters; + u32 num_boosts; }; #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 87d8dc8f51b9..1f9d4fde421f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -11,7 +11,9 @@ #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" @@ -20,6 +22,7 @@ #include "gt/intel_mocs.h" #include "gt/intel_ring.h" +#include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -28,21 +31,6 @@ /** * DOC: GuC-based command submission * - * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC - * firmware is moving to an updated submission interface and we plan to - * turn submission back on when that lands. The below documentation (and related - * code) matches the old submission model and will be updated as part of the - * upgrade to the new flow. - * - * GuC stage descriptor: - * During initialization, the driver allocates a static pool of 1024 such - * descriptors, and shares them with the GuC. Currently, we only use one - * descriptor. This stage descriptor lets the GuC know about the workqueue and - * process descriptor. Theoretically, it also lets the GuC know about our HW - * contexts (context ID, etc...), but we actually employ a kind of submission - * where the GuC uses the LRCA sent via the work item instead. This is called - * a "proxy" submission. - * * The Scratch registers: * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes * a value to the action register (SOFT_SCRATCH_0) along with any data. It then @@ -51,14 +39,85 @@ * processes the request. The kernel driver polls waiting for this update and * then proceeds. * - * Work Items: - * There are several types of work items that the host may place into a - * workqueue, each with its own requirements and limitations. Currently only - * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which - * represents in-order queue. The kernel driver packs ring tail pointer and an - * ELSP context descriptor dword into Work Item. - * See guc_add_request() + * Command Transport buffers (CTBs): + * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host + * - G2H) are a message interface between the i915 and GuC. + * + * Context registration: + * Before a context can be submitted it must be registered with the GuC via a + * H2G. A unique guc_id is associated with each context. The context is either + * registered at request creation time (normal operation) or at submission time + * (abnormal operation, e.g. after a reset). + * + * Context submission: + * The i915 updates the LRC tail value in memory. The i915 must enable the + * scheduling of the context within the GuC for the GuC to actually consider it. + * Therefore, the first time a disabled context is submitted we use a schedule + * enable H2G, while follow up submissions are done via the context submit H2G, + * which informs the GuC that a previously enabled context has new work + * available. + * + * Context unpin: + * To unpin a context a H2G is used to disable scheduling. When the + * corresponding G2H returns indicating the scheduling disable operation has + * completed it is safe to unpin the context. While a disable is in flight it + * isn't safe to resubmit the context so a fence is used to stall all future + * requests of that context until the G2H is returned. + * + * Context deregistration: + * Before a context can be destroyed or if we steal its guc_id we must + * deregister the context with the GuC via H2G. If stealing the guc_id it isn't + * safe to submit anything to this guc_id until the deregister completes so a + * fence is used to stall all requests associated with this guc_id until the + * corresponding G2H returns indicating the guc_id has been deregistered. + * + * submission_state.guc_ids: + * Unique number associated with private GuC context data passed in during + * context registration / submission / deregistration. 64k available. Simple ida + * is used for allocation. + * + * Stealing guc_ids: + * If no guc_ids are available they can be stolen from another context at + * request creation time if that context is unpinned. If a guc_id can't be found + * we punt this problem to the user as we believe this is near impossible to hit + * during normal use cases. * + * Locking: + * In the GuC submission code we have 3 basic spin locks which protect + * everything. Details about each below. + * + * sched_engine->lock + * This is the submission lock for all contexts that share an i915 schedule + * engine (sched_engine), thus only one of the contexts which share a + * sched_engine can be submitting at a time. Currently only one sched_engine is + * used for all of GuC submission but that could change in the future. + * + * guc->submission_state.lock + * Global lock for GuC submission state. Protects guc_ids and destroyed contexts + * list. + * + * ce->guc_state.lock + * Protects everything under ce->guc_state. Ensures that a context is in the + * correct state before issuing a H2G. e.g. We don't issue a schedule disable + * on a disabled context (bad idea), we don't issue a schedule enable when a + * schedule disable is in flight, etc... Also protects list of inflight requests + * on the context and the priority management state. Lock is individual to each + * context. + * + * Lock ordering rules: + * sched_engine->lock -> ce->guc_state.lock + * guc->submission_state.lock -> ce->guc_state.lock + * + * Reset races: + * When a full GT reset is triggered it is assumed that some G2H responses to + * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be + * fatal as we do certain operations upon receiving a G2H (e.g. destroy + * contexts, release guc_ids, etc...). When this occurs we can scrub the + * context state and cleanup appropriately, however this is quite racey. + * To avoid races, the reset code must disable submission before scrubbing for + * the missing G2H, while the submission code must check for submission being + * disabled and skip sending H2Gs and updating context states when it is. Both + * sides must also make sure to hold the relevant locks. */ /* GuC Virtual Engine */ @@ -68,91 +127,56 @@ struct guc_virtual_engine { }; static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags); + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width); #define GUC_REQUEST_SIZE 64 /* bytes */ /* - * Below is a set of functions which control the GuC scheduling state which do - * not require a lock as all state transitions are mutually exclusive. i.e. It - * is not possible for the context pinning code and submission, for the same - * context, to be executing simultaneously. We still need an atomic as it is - * possible for some of the bits to changing at the same time though. + * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous + * per the GuC submission interface. A different allocation algorithm is used + * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to + * partition the guc_id space. We believe the number of multi-lrc contexts in + * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for + * multi-lrc. */ -#define SCHED_STATE_NO_LOCK_ENABLED BIT(0) -#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1) -#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2) -static inline bool context_enabled(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_ENABLED); -} - -static inline void set_context_enabled(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_enabled(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED, - &ce->guc_sched_state_no_lock); -} - -static inline bool context_pending_enable(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_PENDING_ENABLE); -} - -static inline void set_context_pending_enable(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE, - &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_pending_enable(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE, - &ce->guc_sched_state_no_lock); -} - -static inline bool context_registered(struct intel_context *ce) -{ - return (atomic_read(&ce->guc_sched_state_no_lock) & - SCHED_STATE_NO_LOCK_REGISTERED); -} - -static inline void set_context_registered(struct intel_context *ce) -{ - atomic_or(SCHED_STATE_NO_LOCK_REGISTERED, - &ce->guc_sched_state_no_lock); -} - -static inline void clr_context_registered(struct intel_context *ce) -{ - atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED, - &ce->guc_sched_state_no_lock); -} +#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16) /* * Below is a set of functions which control the GuC scheduling state which - * require a lock, aside from the special case where the functions are called - * from guc_lrc_desc_pin(). In that case it isn't possible for any other code - * path to be executing on the context. + * require a lock. */ #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) #define SCHED_STATE_DESTROYED BIT(1) #define SCHED_STATE_PENDING_DISABLE BIT(2) #define SCHED_STATE_BANNED BIT(3) -#define SCHED_STATE_BLOCKED_SHIFT 4 +#define SCHED_STATE_ENABLED BIT(4) +#define SCHED_STATE_PENDING_ENABLE BIT(5) +#define SCHED_STATE_REGISTERED BIT(6) +#define SCHED_STATE_BLOCKED_SHIFT 7 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT) + static inline void init_sched_state(struct intel_context *ce) { - /* Only should be called from guc_lrc_desc_pin() */ - atomic_set(&ce->guc_sched_state_no_lock, 0); - ce->guc_state.sched_state = 0; + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; +} + +__maybe_unused +static bool sched_state_is_init(struct intel_context *ce) +{ + /* + * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after + * suspend. + */ + return !(ce->guc_state.sched_state &= + ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED)); } static inline bool @@ -165,7 +189,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce) static inline void set_context_wait_for_deregister_to_register(struct intel_context *ce) { - /* Only should be called from guc_lrc_desc_pin() without lock */ + lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state |= SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; } @@ -225,6 +249,57 @@ static inline void clr_context_banned(struct intel_context *ce) ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; } +static inline bool context_enabled(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_ENABLED; +} + +static inline void set_context_enabled(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_ENABLED; +} + +static inline void clr_context_enabled(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; +} + +static inline bool context_pending_enable(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; +} + +static inline void set_context_pending_enable(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; +} + +static inline void clr_context_pending_enable(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; +} + +static inline bool context_registered(struct intel_context *ce) +{ + return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; +} + +static inline void set_context_registered(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; +} + +static inline void clr_context_registered(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; +} + static inline u32 context_blocked(struct intel_context *ce) { return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> @@ -233,7 +308,6 @@ static inline u32 context_blocked(struct intel_context *ce) static inline void incr_context_blocked(struct intel_context *ce) { - lockdep_assert_held(&ce->engine->sched_engine->lock); lockdep_assert_held(&ce->guc_state.lock); ce->guc_state.sched_state += SCHED_STATE_BLOCKED; @@ -243,7 +317,6 @@ static inline void incr_context_blocked(struct intel_context *ce) static inline void decr_context_blocked(struct intel_context *ce) { - lockdep_assert_held(&ce->engine->sched_engine->lock); lockdep_assert_held(&ce->guc_state.lock); GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */ @@ -251,14 +324,39 @@ static inline void decr_context_blocked(struct intel_context *ce) ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; } +static inline bool context_has_committed_requests(struct intel_context *ce) +{ + return !!ce->guc_state.number_committed_requests; +} + +static inline void incr_context_committed_requests(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + ++ce->guc_state.number_committed_requests; + GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); +} + +static inline void decr_context_committed_requests(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + --ce->guc_state.number_committed_requests; + GEM_BUG_ON(ce->guc_state.number_committed_requests < 0); +} + +static struct intel_context * +request_to_scheduling_context(struct i915_request *rq) +{ + return intel_context_to_parent(rq->context); +} + static inline bool context_guc_id_invalid(struct intel_context *ce) { - return ce->guc_id == GUC_INVALID_LRC_ID; + return ce->guc_id.id == GUC_INVALID_LRC_ID; } static inline void set_context_guc_id_invalid(struct intel_context *ce) { - ce->guc_id = GUC_INVALID_LRC_ID; + ce->guc_id.id = GUC_INVALID_LRC_ID; } static inline struct intel_guc *ce_to_guc(struct intel_context *ce) @@ -271,6 +369,104 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) return rb_entry(rb, struct i915_priolist, node); } +/* + * When using multi-lrc submission a scratch memory area is reserved in the + * parent's context state for the process descriptor, work queue, and handshake + * between the parent + children contexts to insert safe preemption points + * between each of the BBs. Currently the scratch area is sized to a page. + * + * The layout of this scratch area is below: + * 0 guc_process_desc + * + sizeof(struct guc_process_desc) child go + * + CACHELINE_BYTES child join[0] + * ... + * + CACHELINE_BYTES child join[n - 1] + * ... unused + * PARENT_SCRATCH_SIZE / 2 work queue start + * ... work queue + * PARENT_SCRATCH_SIZE - 1 work queue end + */ +#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2) +#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE) + +struct sync_semaphore { + u32 semaphore; + u8 unused[CACHELINE_BYTES - sizeof(u32)]; +}; + +struct parent_scratch { + struct guc_process_desc pdesc; + + struct sync_semaphore go; + struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1]; + + u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) - + sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)]; + + u32 wq[WQ_SIZE / sizeof(u32)]; +}; + +static u32 __get_parent_scratch_offset(struct intel_context *ce) +{ + GEM_BUG_ON(!ce->parallel.guc.parent_page); + + return ce->parallel.guc.parent_page * PAGE_SIZE; +} + +static u32 __get_wq_offset(struct intel_context *ce) +{ + BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET); + + return __get_parent_scratch_offset(ce) + WQ_OFFSET; +} + +static struct parent_scratch * +__get_parent_scratch(struct intel_context *ce) +{ + BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE); + BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES); + + /* + * Need to subtract LRC_STATE_OFFSET here as the + * parallel.guc.parent_page is the offset into ce->state while + * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET. + */ + return (struct parent_scratch *) + (ce->lrc_reg_state + + ((__get_parent_scratch_offset(ce) - + LRC_STATE_OFFSET) / sizeof(u32))); +} + +static struct guc_process_desc * +__get_process_desc(struct intel_context *ce) +{ + struct parent_scratch *ps = __get_parent_scratch(ce); + + return &ps->pdesc; +} + +static u32 *get_wq_pointer(struct guc_process_desc *desc, + struct intel_context *ce, + u32 wqi_size) +{ + /* + * Check for space in work queue. Caching a value of head pointer in + * intel_context structure in order reduce the number accesses to shared + * GPU memory which may be across a PCIe bus. + */ +#define AVAILABLE_SPACE \ + CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) + if (wqi_size > AVAILABLE_SPACE) { + ce->parallel.guc.wqi_head = READ_ONCE(desc->head); + + if (wqi_size > AVAILABLE_SPACE) + return NULL; + } +#undef AVAILABLE_SPACE + + return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; +} + static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index) { struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr; @@ -352,20 +548,29 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, xa_unlock_irqrestore(&guc->context_lookup, flags); } +static void decr_outstanding_submission_g2h(struct intel_guc *guc) +{ + if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) + wake_up_all(&guc->ct.wq); +} + static int guc_submission_send_busy_loop(struct intel_guc *guc, const u32 *action, u32 len, u32 g2h_len_dw, bool loop) { - int err; - - err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); + /* + * We always loop when a send requires a reply (i.e. g2h_len_dw > 0), + * so we don't handle the case where we don't get a reply because we + * aborted the send due to the channel being busy. + */ + GEM_BUG_ON(g2h_len_dw && !loop); - if (!err && g2h_len_dw) + if (g2h_len_dw) atomic_inc(&guc->outstanding_submission_g2h); - return err; + return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); } int intel_guc_wait_for_pending_msg(struct intel_guc *guc, @@ -421,15 +626,17 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) static int guc_lrc_desc_pin(struct intel_context *ce, bool loop); -static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) +static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) { int err = 0; - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u32 action[3]; int len = 0; u32 g2h_len_dw = 0; bool enabled; + lockdep_assert_held(&rq->engine->sched_engine->lock); + /* * Corner case where requests were sitting in the priority list or a * request resubmitted after the context was banned. @@ -437,41 +644,34 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) if (unlikely(intel_context_is_banned(ce))) { i915_request_put(i915_request_mark_eio(rq)); intel_engine_signal_breadcrumbs(ce->engine); - goto out; + return 0; } - GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); + GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); - /* - * Corner case where the GuC firmware was blown away and reloaded while - * this context was pinned. - */ - if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { - err = guc_lrc_desc_pin(ce, false); - if (unlikely(err)) - goto out; - } + spin_lock(&ce->guc_state.lock); /* * The request / context will be run on the hardware when scheduling - * gets enabled in the unblock. + * gets enabled in the unblock. For multi-lrc we still submit the + * context to move the LRC tails. */ - if (unlikely(context_blocked(ce))) + if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce))) goto out; - enabled = context_enabled(ce); + enabled = context_enabled(ce) || context_blocked(ce); if (!enabled) { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; - action[len++] = ce->guc_id; + action[len++] = ce->guc_id.id; action[len++] = GUC_CONTEXT_ENABLE; set_context_pending_enable(ce); intel_context_get(ce); g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET; } else { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT; - action[len++] = ce->guc_id; + action[len++] = ce->guc_id.id; } err = intel_guc_send_nb(guc, action, len, g2h_len_dw); @@ -479,6 +679,18 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) trace_intel_context_sched_enable(ce); atomic_inc(&guc->outstanding_submission_g2h); set_context_enabled(ce); + + /* + * Without multi-lrc KMD does the submission step (moving the + * lrc tail) so enabling scheduling is sufficient to submit the + * context. This isn't the case in multi-lrc submission as the + * GuC needs to move the tails, hence the need for another H2G + * to submit a multi-lrc context after enabling scheduling. + */ + if (intel_context_is_parent(ce)) { + action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT; + err = intel_guc_send_nb(guc, action, len - 1, 0); + } } else if (!enabled) { clr_context_pending_enable(ce); intel_context_put(ce); @@ -487,9 +699,22 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) trace_i915_request_guc_submit(rq); out: + spin_unlock(&ce->guc_state.lock); return err; } +static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) +{ + int ret = __guc_add_request(guc, rq); + + if (unlikely(ret == -EBUSY)) { + guc->stalled_request = rq; + guc->submission_stall_reason = STALL_ADD_REQUEST; + } + + return ret; +} + static inline void guc_set_lrc_tail(struct i915_request *rq) { rq->context->lrc_reg_state[CTX_RING_TAIL] = @@ -501,6 +726,135 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority; } +static bool is_multi_lrc_rq(struct i915_request *rq) +{ + return intel_context_is_parallel(rq->context); +} + +static bool can_merge_rq(struct i915_request *rq, + struct i915_request *last) +{ + return request_to_scheduling_context(rq) == + request_to_scheduling_context(last); +} + +static u32 wq_space_until_wrap(struct intel_context *ce) +{ + return (WQ_SIZE - ce->parallel.guc.wqi_tail); +} + +static void write_wqi(struct guc_process_desc *desc, + struct intel_context *ce, + u32 wqi_size) +{ + BUILD_BUG_ON(!is_power_of_2(WQ_SIZE)); + + /* + * Ensure WQI are visible before updating tail + */ + intel_guc_write_barrier(ce_to_guc(ce)); + + ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & + (WQ_SIZE - 1); + WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail); +} + +static int guc_wq_noop_append(struct intel_context *ce) +{ + struct guc_process_desc *desc = __get_process_desc(ce); + u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce)); + u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; + + if (!wqi) + return -EBUSY; + + GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); + + *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | + FIELD_PREP(WQ_LEN_MASK, len_dw); + ce->parallel.guc.wqi_tail = 0; + + return 0; +} + +static int __guc_wq_item_append(struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + struct intel_context *child; + struct guc_process_desc *desc = __get_process_desc(ce); + unsigned int wqi_size = (ce->parallel.number_children + 4) * + sizeof(u32); + u32 *wqi; + u32 len_dw = (wqi_size / sizeof(u32)) - 1; + int ret; + + /* Ensure context is in correct state updating work queue */ + GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); + GEM_BUG_ON(context_guc_id_invalid(ce)); + GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); + GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)); + + /* Insert NOOP if this work queue item will wrap the tail pointer. */ + if (wqi_size > wq_space_until_wrap(ce)) { + ret = guc_wq_noop_append(ce); + if (ret) + return ret; + } + + wqi = get_wq_pointer(desc, ce, wqi_size); + if (!wqi) + return -EBUSY; + + GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw)); + + *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) | + FIELD_PREP(WQ_LEN_MASK, len_dw); + *wqi++ = ce->lrc.lrca; + *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) | + FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64)); + *wqi++ = 0; /* fence_id */ + for_each_child(ce, child) + *wqi++ = child->ring->tail / sizeof(u64); + + write_wqi(desc, ce, wqi_size); + + return 0; +} + +static int guc_wq_item_append(struct intel_guc *guc, + struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + int ret = 0; + + if (likely(!intel_context_is_banned(ce))) { + ret = __guc_wq_item_append(rq); + + if (unlikely(ret == -EBUSY)) { + guc->stalled_request = rq; + guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; + } + } + + return ret; +} + +static bool multi_lrc_submit(struct i915_request *rq) +{ + struct intel_context *ce = request_to_scheduling_context(rq); + + intel_ring_set_tail(rq->ring, rq->tail); + + /* + * We expect the front end (execbuf IOCTL) to set this flag on the last + * request generated from a multi-BB submission. This indicates to the + * backend (GuC interface) that we should submit this context thus + * submitting all the requests generated in parallel. + */ + return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) || + intel_context_is_banned(ce); +} + static int guc_dequeue_one_context(struct intel_guc *guc) { struct i915_sched_engine * const sched_engine = guc->sched_engine; @@ -514,7 +868,17 @@ static int guc_dequeue_one_context(struct intel_guc *guc) if (guc->stalled_request) { submit = true; last = guc->stalled_request; - goto resubmit; + + switch (guc->submission_stall_reason) { + case STALL_REGISTER_CONTEXT: + goto register_context; + case STALL_MOVE_LRC_TAIL: + goto move_lrc_tail; + case STALL_ADD_REQUEST: + goto add_request; + default: + MISSING_CASE(guc->submission_stall_reason); + } } while ((rb = rb_first_cached(&sched_engine->queue))) { @@ -522,8 +886,8 @@ static int guc_dequeue_one_context(struct intel_guc *guc) struct i915_request *rq, *rn; priolist_for_each_request_consume(rq, rn, p) { - if (last && rq->context != last->context) - goto done; + if (last && !can_merge_rq(rq, last)) + goto register_context; list_del_init(&rq->sched.link); @@ -531,33 +895,84 @@ static int guc_dequeue_one_context(struct intel_guc *guc) trace_i915_request_in(rq, 0); last = rq; - submit = true; + + if (is_multi_lrc_rq(rq)) { + /* + * We need to coalesce all multi-lrc requests in + * a relationship into a single H2G. We are + * guaranteed that all of these requests will be + * submitted sequentially. + */ + if (multi_lrc_submit(rq)) { + submit = true; + goto register_context; + } + } else { + submit = true; + } } rb_erase_cached(&p->node, &sched_engine->queue); i915_priolist_free(p); } -done: + +register_context: if (submit) { - guc_set_lrc_tail(last); -resubmit: + struct intel_context *ce = request_to_scheduling_context(last); + + if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) && + !intel_context_is_banned(ce))) { + ret = guc_lrc_desc_pin(ce, false); + if (unlikely(ret == -EPIPE)) { + goto deadlk; + } else if (ret == -EBUSY) { + guc->stalled_request = last; + guc->submission_stall_reason = + STALL_REGISTER_CONTEXT; + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ + goto deadlk; + } + } + +move_lrc_tail: + if (is_multi_lrc_rq(last)) { + ret = guc_wq_item_append(guc, last); + if (ret == -EBUSY) { + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ + goto deadlk; + } + } else { + guc_set_lrc_tail(last); + } + +add_request: ret = guc_add_request(guc, last); - if (unlikely(ret == -EPIPE)) + if (unlikely(ret == -EPIPE)) { + goto deadlk; + } else if (ret == -EBUSY) { + goto schedule_tasklet; + } else if (ret != 0) { + GEM_WARN_ON(ret); /* Unexpected */ goto deadlk; - else if (ret == -EBUSY) { - tasklet_schedule(&sched_engine->tasklet); - guc->stalled_request = last; - return false; } } guc->stalled_request = NULL; + guc->submission_stall_reason = STALL_NONE; return submit; deadlk: sched_engine->tasklet.callback = NULL; tasklet_disable_nosync(&sched_engine->tasklet); return false; + +schedule_tasklet: + tasklet_schedule(&sched_engine->tasklet); + return false; } static void guc_submission_tasklet(struct tasklet_struct *t) @@ -596,10 +1011,18 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) unsigned long index, flags; bool pending_disable, pending_enable, deregister, destroyed, banned; + xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - /* Flush context */ - spin_lock_irqsave(&ce->guc_state.lock, flags); - spin_unlock_irqrestore(&ce->guc_state.lock, flags); + /* + * Corner case where the ref count on the object is zero but and + * deregister G2H was lost. In this case we don't touch the ref + * count and finish the destroy of the context. + */ + bool do_put = kref_get_unless_zero(&ce->ref); + + xa_unlock(&guc->context_lookup); + + spin_lock(&ce->guc_state.lock); /* * Once we are at this point submission_disabled() is guaranteed @@ -615,11 +1038,16 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) banned = context_banned(ce); init_sched_state(ce); + spin_unlock(&ce->guc_state.lock); + + GEM_BUG_ON(!do_put && !destroyed); + if (pending_enable || destroyed || deregister) { - atomic_dec(&guc->outstanding_submission_g2h); + decr_outstanding_submission_g2h(guc); if (deregister) guc_signal_context_fence(ce); if (destroyed) { + intel_gt_pm_put_async(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } @@ -635,14 +1063,285 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) intel_engine_signal_breadcrumbs(ce->engine); } intel_context_sched_disable_unpin(ce); - atomic_dec(&guc->outstanding_submission_g2h); - spin_lock_irqsave(&ce->guc_state.lock, flags); + decr_outstanding_submission_g2h(guc); + + spin_lock(&ce->guc_state.lock); guc_blocked_fence_complete(ce); - spin_unlock_irqrestore(&ce->guc_state.lock, flags); + spin_unlock(&ce->guc_state.lock); intel_context_put(ce); } + + if (do_put) + intel_context_put(ce); + xa_lock(&guc->context_lookup); } + xa_unlock_irqrestore(&guc->context_lookup, flags); +} + +/* + * GuC stores busyness stats for each engine at context in/out boundaries. A + * context 'in' logs execution start time, 'out' adds in -> out delta to total. + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with + * GuC. + * + * __i915_pmu_event_read samples engine busyness. When sampling, if context id + * is valid (!= ~0) and start is non-zero, the engine is considered to be + * active. For an active engine total busyness = total + (now - start), where + * 'now' is the time at which the busyness is sampled. For inactive engine, + * total busyness = total. + * + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. + * + * The start and total values provided by GuC are 32 bits and wrap around in a + * few minutes. Since perf pmu provides busyness as 64 bit monotonically + * increasing ns values, there is a need for this implementation to account for + * overflows and extend the GuC provided values to 64 bits before returning + * busyness to the user. In order to do that, a worker runs periodically at + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in + * 27 seconds for a gt clock frequency of 19.2 MHz). + */ + +#define WRAP_TIME_CLKS U32_MAX +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3) + +static void +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) +{ + u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); + + if (new_start == lower_32_bits(*prev_start)) + return; + + if (new_start < gt_stamp_last && + (new_start - gt_stamp_last) <= POLL_TIME_CLKS) + gt_stamp_hi++; + + if (new_start > gt_stamp_last && + (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi) + gt_stamp_hi--; + + *prev_start = ((u64)gt_stamp_hi << 32) | new_start; +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ + struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + struct intel_engine_guc_stats *stats = &engine->stats.guc; + struct intel_guc *guc = &engine->gt->uc.guc; + u32 last_switch = rec->last_switch_in_stamp; + u32 ctx_id = rec->current_context_index; + u32 total = rec->total_runtime; + + lockdep_assert_held(&guc->timestamp.lock); + + stats->running = ctx_id != ~0U && last_switch; + if (stats->running) + __extend_last_switch(guc, &stats->start_gt_clk, last_switch); + + /* + * Instead of adjusting the total for overflow, just add the + * difference from previous sample stats->total_gt_clks + */ + if (total && total != ~0U) { + stats->total_gt_clks += (u32)(total - stats->prev_total); + stats->prev_total = total; + } +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, + struct intel_engine_cs *engine, + ktime_t *now) +{ + u32 gt_stamp_now, gt_stamp_hi; + + lockdep_assert_held(&guc->timestamp.lock); + + gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + gt_stamp_now = intel_uncore_read(engine->uncore, + RING_TIMESTAMP(engine->mmio_base)); + *now = ktime_get(); + + if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + gt_stamp_hi++; + + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; +} + +/* + * Unlike the execlist mode of submission total and active times are in terms of + * gt clocks. The *now parameter is retained to return the cpu time at which the + * busyness was sampled. + */ +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) +{ + struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; + struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; + struct intel_guc *guc = >->uc.guc; + u64 total, gt_stamp_saved; + unsigned long flags; + u32 reset_count; + bool in_reset; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + /* + * If a reset happened, we risk reading partially updated engine + * busyness from GuC, so we just use the driver stored copy of busyness. + * Synchronize with gt reset using reset_count and the + * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count + * after I915_RESET_BACKOFF flag, so ensure that the reset_count is + * usable by checking the flag afterwards. + */ + reset_count = i915_reset_count(gpu_error); + in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags); + + *now = ktime_get(); + + /* + * The active busyness depends on start_gt_clk and gt_stamp. + * gt_stamp is updated by i915 only when gt is awake and the + * start_gt_clk is derived from GuC state. To get a consistent + * view of activity, we query the GuC state only if gt is awake. + */ + if (intel_gt_pm_get_if_awake(gt) && !in_reset) { + stats_saved = *stats; + gt_stamp_saved = guc->timestamp.gt_stamp; + guc_update_engine_gt_clks(engine); + guc_update_pm_timestamp(guc, engine, now); + intel_gt_pm_put_async(gt); + if (i915_reset_count(gpu_error) != reset_count) { + *stats = stats_saved; + guc->timestamp.gt_stamp = gt_stamp_saved; + } + } + + total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); + if (stats->running) { + u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; + + total += intel_gt_clock_interval_to_ns(gt, clk); + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); + + return ns_to_ktime(total); +} + +static void __reset_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + cancel_delayed_work_sync(&guc->timestamp.work); + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + engine->stats.guc.prev_total = 0; + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void __update_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void guc_timestamp_ping(struct work_struct *wrk) +{ + struct intel_guc *guc = container_of(wrk, typeof(*guc), + timestamp.work.work); + struct intel_uc *uc = container_of(guc, typeof(*uc), guc); + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + int srcu, ret; + + /* + * Synchronize with gt reset to make sure the worker does not + * corrupt the engine/guc stats. + */ + ret = intel_gt_reset_trylock(gt, &srcu); + if (ret) + return; + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __update_guc_busyness_stats(guc); + + intel_gt_reset_unlock(gt, srcu); + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + +static int guc_action_enable_usage_stats(struct intel_guc *guc) +{ + u32 offset = intel_guc_engine_usage_offset(guc); + u32 action[] = { + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, + offset, + 0, + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static void guc_init_engine_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { + int ret = guc_action_enable_usage_stats(guc); + + if (ret) + drm_err(>->i915->drm, + "Failed to enable usage stats: %d!\n", ret); + } +} + +void intel_guc_busyness_park(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + cancel_delayed_work(&guc->timestamp.work); + __update_guc_busyness_stats(guc); +} + +void intel_guc_busyness_unpark(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); } static inline bool @@ -692,6 +1391,8 @@ static void guc_flush_submissions(struct intel_guc *guc) spin_unlock_irqrestore(&sched_engine->lock, flags); } +static void guc_flush_destroyed_contexts(struct intel_guc *guc); + void intel_guc_submission_reset_prepare(struct intel_guc *guc) { int i; @@ -704,12 +1405,14 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) intel_gt_park_heartbeats(guc_to_gt(guc)); disable_submission(guc); guc->interrupts.disable(guc); + __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ spin_lock_irq(&guc_to_gt(guc)->irq_lock); spin_unlock_irq(&guc_to_gt(guc)->irq_lock); guc_flush_submissions(guc); + guc_flush_destroyed_contexts(guc); /* * Handle any outstanding G2Hs before reset. Call IRQ handler directly @@ -725,6 +1428,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) wait_for_reset(guc, &guc->outstanding_submission_g2h); } while (!list_empty(&guc->ct.requests.incoming)); } + scrub_guc_desc_for_outstanding_g2h(guc); } @@ -796,16 +1500,14 @@ __unwind_incomplete_requests(struct intel_context *ce) unsigned long flags; spin_lock_irqsave(&sched_engine->lock, flags); - spin_lock(&ce->guc_active.lock); - list_for_each_entry_safe(rq, rn, - &ce->guc_active.requests, - sched.link) { + spin_lock(&ce->guc_state.lock); + list_for_each_entry_safe_reverse(rq, rn, + &ce->guc_state.requests, + sched.link) { if (i915_request_completed(rq)) continue; list_del_init(&rq->sched.link); - spin_unlock(&ce->guc_active.lock); - __i915_request_unsubmit(rq); /* Push the request back into the queue for later resubmission. */ @@ -816,64 +1518,111 @@ __unwind_incomplete_requests(struct intel_context *ce) } GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine)); - list_add_tail(&rq->sched.link, pl); + list_add(&rq->sched.link, pl); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - - spin_lock(&ce->guc_active.lock); } - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); spin_unlock_irqrestore(&sched_engine->lock, flags); } static void __guc_reset_context(struct intel_context *ce, bool stalled) { + bool local_stalled; struct i915_request *rq; + unsigned long flags; u32 head; + int i, number_children = ce->parallel.number_children; + bool skip = false; + struct intel_context *parent = ce; + + GEM_BUG_ON(intel_context_is_child(ce)); intel_context_get(ce); /* - * GuC will implicitly mark the context as non-schedulable - * when it sends the reset notification. Make sure our state - * reflects this change. The context will be marked enabled - * on resubmission. + * GuC will implicitly mark the context as non-schedulable when it sends + * the reset notification. Make sure our state reflects this change. The + * context will be marked enabled on resubmission. + * + * XXX: If the context is reset as a result of the request cancellation + * this G2H is received after the schedule disable complete G2H which is + * wrong as this creates a race between the request cancellation code + * re-submitting the context and this G2H handler. This is a bug in the + * GuC but can be worked around in the meantime but converting this to a + * NOP if a pending enable is in flight as this indicates that a request + * cancellation has occurred. */ - clr_context_enabled(ce); + spin_lock_irqsave(&ce->guc_state.lock, flags); + if (likely(!context_pending_enable(ce))) + clr_context_enabled(ce); + else + skip = true; + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(skip)) + goto out_put; - rq = intel_context_find_active_request(ce); - if (!rq) { - head = ce->ring->tail; - stalled = false; - goto out_replay; - } + /* + * For each context in the relationship find the hanging request + * resetting each context / request as needed + */ + for (i = 0; i < number_children + 1; ++i) { + if (!intel_context_is_pinned(ce)) + goto next_context; + + local_stalled = false; + rq = intel_context_find_active_request(ce); + if (!rq) { + head = ce->ring->tail; + goto out_replay; + } - if (!i915_request_started(rq)) - stalled = false; + if (i915_request_started(rq)) + local_stalled = true; - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - head = intel_ring_wrap(ce->ring, rq->head); - __i915_request_reset(rq, stalled); + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + head = intel_ring_wrap(ce->ring, rq->head); + __i915_request_reset(rq, local_stalled && stalled); out_replay: - guc_reset_state(ce, head, stalled); - __unwind_incomplete_requests(ce); - intel_context_put(ce); + guc_reset_state(ce, head, local_stalled && stalled); +next_context: + if (i != number_children) + ce = list_next_entry(ce, parallel.child_link); + } + + __unwind_incomplete_requests(parent); +out_put: + intel_context_put(parent); } void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) { struct intel_context *ce; unsigned long index; + unsigned long flags; if (unlikely(!guc_submission_initialized(guc))) { /* Reset called during driver load? GuC not yet initialised! */ return; } - xa_for_each(&guc->context_lookup, index, ce) - if (intel_context_is_pinned(ce)) + xa_lock_irqsave(&guc->context_lookup, flags); + xa_for_each(&guc->context_lookup, index, ce) { + if (!kref_get_unless_zero(&ce->ref)) + continue; + + xa_unlock(&guc->context_lookup); + + if (intel_context_is_pinned(ce) && + !intel_context_is_child(ce)) __guc_reset_context(ce, stalled); + intel_context_put(ce); + + xa_lock(&guc->context_lookup); + } + xa_unlock_irqrestore(&guc->context_lookup, flags); + /* GuC is blown away, drop all references to contexts */ xa_destroy(&guc->context_lookup); } @@ -886,10 +1635,10 @@ static void guc_cancel_context_requests(struct intel_context *ce) /* Mark all executing requests as skipped. */ spin_lock_irqsave(&sched_engine->lock, flags); - spin_lock(&ce->guc_active.lock); - list_for_each_entry(rq, &ce->guc_active.requests, sched.link) + spin_lock(&ce->guc_state.lock); + list_for_each_entry(rq, &ce->guc_state.requests, sched.link) i915_request_put(i915_request_mark_eio(rq)); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); spin_unlock_irqrestore(&sched_engine->lock, flags); } @@ -948,11 +1697,25 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc) { struct intel_context *ce; unsigned long index; + unsigned long flags; - xa_for_each(&guc->context_lookup, index, ce) - if (intel_context_is_pinned(ce)) + xa_lock_irqsave(&guc->context_lookup, flags); + xa_for_each(&guc->context_lookup, index, ce) { + if (!kref_get_unless_zero(&ce->ref)) + continue; + + xa_unlock(&guc->context_lookup); + + if (intel_context_is_pinned(ce) && + !intel_context_is_child(ce)) guc_cancel_context_requests(ce); + intel_context_put(ce); + + xa_lock(&guc->context_lookup); + } + xa_unlock_irqrestore(&guc->context_lookup, flags); + guc_cancel_sched_engine_requests(guc->sched_engine); /* GuC is blown away, drop all references to contexts */ @@ -981,12 +1744,15 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc) intel_gt_unpark_heartbeats(guc_to_gt(guc)); } +static void destroyed_worker_func(struct work_struct *w); + /* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. */ int intel_guc_submission_init(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); int ret; if (guc->lrc_desc_pool) @@ -1003,9 +1769,21 @@ int intel_guc_submission_init(struct intel_guc *guc) xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ); - spin_lock_init(&guc->contexts_lock); - INIT_LIST_HEAD(&guc->guc_id_list); - ida_init(&guc->guc_ids); + spin_lock_init(&guc->submission_state.lock); + INIT_LIST_HEAD(&guc->submission_state.guc_id_list); + ida_init(&guc->submission_state.guc_ids); + INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts); + INIT_WORK(&guc->submission_state.destroyed_worker, + destroyed_worker_func); + + guc->submission_state.guc_ids_bitmap = + bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL); + if (!guc->submission_state.guc_ids_bitmap) + return -ENOMEM; + + spin_lock_init(&guc->timestamp.lock); + INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); + guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; return 0; } @@ -1015,8 +1793,10 @@ void intel_guc_submission_fini(struct intel_guc *guc) if (!guc->lrc_desc_pool) return; + guc_flush_destroyed_contexts(guc); guc_lrc_desc_pool_destroy(guc); i915_sched_engine_put(guc->sched_engine); + bitmap_free(guc->submission_state.guc_ids_bitmap); } static inline void queue_request(struct i915_sched_engine *sched_engine, @@ -1027,21 +1807,28 @@ static inline void queue_request(struct i915_sched_engine *sched_engine, list_add_tail(&rq->sched.link, i915_sched_lookup_priolist(sched_engine, prio)); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + tasklet_hi_schedule(&sched_engine->tasklet); } static int guc_bypass_tasklet_submit(struct intel_guc *guc, struct i915_request *rq) { - int ret; + int ret = 0; __i915_request_submit(rq); trace_i915_request_in(rq, 0); - guc_set_lrc_tail(rq); - ret = guc_add_request(guc, rq); - if (ret == -EBUSY) - guc->stalled_request = rq; + if (is_multi_lrc_rq(rq)) { + if (multi_lrc_submit(rq)) { + ret = guc_wq_item_append(guc, rq); + if (!ret) + ret = guc_add_request(guc, rq); + } + } else { + guc_set_lrc_tail(rq); + ret = guc_add_request(guc, rq); + } if (unlikely(ret == -EPIPE)) disable_submission(guc); @@ -1049,6 +1836,16 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc, return ret; } +static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq) +{ + struct i915_sched_engine *sched_engine = rq->engine->sched_engine; + struct intel_context *ce = request_to_scheduling_context(rq); + + return submission_disabled(guc) || guc->stalled_request || + !i915_sched_engine_is_empty(sched_engine) || + !lrc_desc_registered(guc, ce->guc_id.id); +} + static void guc_submit_request(struct i915_request *rq) { struct i915_sched_engine *sched_engine = rq->engine->sched_engine; @@ -1058,8 +1855,7 @@ static void guc_submit_request(struct i915_request *rq) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&sched_engine->lock, flags); - if (submission_disabled(guc) || guc->stalled_request || - !i915_sched_engine_is_empty(sched_engine)) + if (need_tasklet(guc, rq)) queue_request(sched_engine, rq, rq_prio(rq)); else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY) tasklet_hi_schedule(&sched_engine->tasklet); @@ -1067,72 +1863,117 @@ static void guc_submit_request(struct i915_request *rq) spin_unlock_irqrestore(&sched_engine->lock, flags); } -static int new_guc_id(struct intel_guc *guc) +static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) { - return ida_simple_get(&guc->guc_ids, 0, - GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + int ret; + + GEM_BUG_ON(intel_context_is_child(ce)); + + if (intel_context_is_parent(ce)) + ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap, + NUMBER_MULTI_LRC_GUC_ID, + order_base_2(ce->parallel.number_children + + 1)); + else + ret = ida_simple_get(&guc->submission_state.guc_ids, + NUMBER_MULTI_LRC_GUC_ID, + GUC_MAX_LRC_DESCRIPTORS, + GFP_KERNEL | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN); + if (unlikely(ret < 0)) + return ret; + + ce->guc_id.id = ret; + return 0; } static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) { + GEM_BUG_ON(intel_context_is_child(ce)); + if (!context_guc_id_invalid(ce)) { - ida_simple_remove(&guc->guc_ids, ce->guc_id); - reset_lrc_desc(guc, ce->guc_id); + if (intel_context_is_parent(ce)) + bitmap_release_region(guc->submission_state.guc_ids_bitmap, + ce->guc_id.id, + order_base_2(ce->parallel.number_children + + 1)); + else + ida_simple_remove(&guc->submission_state.guc_ids, + ce->guc_id.id); + reset_lrc_desc(guc, ce->guc_id.id); set_context_guc_id_invalid(ce); } - if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); } static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) { unsigned long flags; - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); __release_guc_id(guc, ce); - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); } -static int steal_guc_id(struct intel_guc *guc) +static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) { - struct intel_context *ce; - int guc_id; + struct intel_context *cn; - lockdep_assert_held(&guc->contexts_lock); + lockdep_assert_held(&guc->submission_state.lock); + GEM_BUG_ON(intel_context_is_child(ce)); + GEM_BUG_ON(intel_context_is_parent(ce)); - if (!list_empty(&guc->guc_id_list)) { - ce = list_first_entry(&guc->guc_id_list, + if (!list_empty(&guc->submission_state.guc_id_list)) { + cn = list_first_entry(&guc->submission_state.guc_id_list, struct intel_context, - guc_id_link); + guc_id.link); - GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); - GEM_BUG_ON(context_guc_id_invalid(ce)); + GEM_BUG_ON(atomic_read(&cn->guc_id.ref)); + GEM_BUG_ON(context_guc_id_invalid(cn)); + GEM_BUG_ON(intel_context_is_child(cn)); + GEM_BUG_ON(intel_context_is_parent(cn)); - list_del_init(&ce->guc_id_link); - guc_id = ce->guc_id; - clr_context_registered(ce); - set_context_guc_id_invalid(ce); - return guc_id; + list_del_init(&cn->guc_id.link); + ce->guc_id = cn->guc_id; + + spin_lock(&ce->guc_state.lock); + clr_context_registered(cn); + spin_unlock(&ce->guc_state.lock); + + set_context_guc_id_invalid(cn); + + return 0; } else { return -EAGAIN; } } -static int assign_guc_id(struct intel_guc *guc, u16 *out) +static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce) { int ret; - lockdep_assert_held(&guc->contexts_lock); + lockdep_assert_held(&guc->submission_state.lock); + GEM_BUG_ON(intel_context_is_child(ce)); - ret = new_guc_id(guc); + ret = new_guc_id(guc, ce); if (unlikely(ret < 0)) { - ret = steal_guc_id(guc); + if (intel_context_is_parent(ce)) + return -ENOSPC; + + ret = steal_guc_id(guc, ce); if (ret < 0) return ret; } - *out = ret; + if (intel_context_is_parent(ce)) { + struct intel_context *child; + int i = 1; + + for_each_child(ce, child) + child->guc_id.id = ce->guc_id.id + i++; + } + return 0; } @@ -1142,26 +1983,28 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) int ret = 0; unsigned long flags, tries = PIN_GUC_ID_TRIES; - GEM_BUG_ON(atomic_read(&ce->guc_id_ref)); + GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); try_again: - spin_lock_irqsave(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); + + might_lock(&ce->guc_state.lock); if (context_guc_id_invalid(ce)) { - ret = assign_guc_id(guc, &ce->guc_id); + ret = assign_guc_id(guc, ce); if (ret) goto out_unlock; ret = 1; /* Indidcates newly assigned guc_id */ } - if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); - atomic_inc(&ce->guc_id_ref); + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); + atomic_inc(&ce->guc_id.ref); out_unlock: - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); /* - * -EAGAIN indicates no guc_ids are available, let's retire any + * -EAGAIN indicates no guc_id are available, let's retire any * outstanding requests to see if that frees up a guc_id. If the first * retire didn't help, insert a sleep with the timeslice duration before * attempting to retire more requests. Double the sleep period each @@ -1189,16 +2032,43 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) { unsigned long flags; - GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0); + GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0); + GEM_BUG_ON(intel_context_is_child(ce)); - if (unlikely(context_guc_id_invalid(ce))) + if (unlikely(context_guc_id_invalid(ce) || + intel_context_is_parent(ce))) return; - spin_lock_irqsave(&guc->contexts_lock, flags); - if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) && - !atomic_read(&ce->guc_id_ref)) - list_add_tail(&ce->guc_id_link, &guc->guc_id_list); - spin_unlock_irqrestore(&guc->contexts_lock, flags); + spin_lock_irqsave(&guc->submission_state.lock, flags); + if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) && + !atomic_read(&ce->guc_id.ref)) + list_add_tail(&ce->guc_id.link, + &guc->submission_state.guc_id_list); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); +} + +static int __guc_action_register_multi_lrc(struct intel_guc *guc, + struct intel_context *ce, + u32 guc_id, + u32 offset, + bool loop) +{ + struct intel_context *child; + u32 action[4 + MAX_ENGINE_INSTANCE]; + int len = 0; + + GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); + + action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; + action[len++] = guc_id; + action[len++] = ce->parallel.number_children + 1; + action[len++] = offset; + for_each_child(ce, child) { + offset += sizeof(struct guc_lrc_desc); + action[len++] = offset; + } + + return guc_submission_send_busy_loop(guc, action, len, 0, loop); } static int __guc_action_register_context(struct intel_guc *guc, @@ -1220,21 +2090,31 @@ static int register_context(struct intel_context *ce, bool loop) { struct intel_guc *guc = ce_to_guc(ce); u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) + - ce->guc_id * sizeof(struct guc_lrc_desc); + ce->guc_id.id * sizeof(struct guc_lrc_desc); int ret; + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_register(ce); - ret = __guc_action_register_context(guc, ce->guc_id, offset, loop); - if (likely(!ret)) + if (intel_context_is_parent(ce)) + ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id, + offset, loop); + else + ret = __guc_action_register_context(guc, ce->guc_id.id, offset, + loop); + if (likely(!ret)) { + unsigned long flags; + + spin_lock_irqsave(&ce->guc_state.lock, flags); set_context_registered(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + } return ret; } static int __guc_action_deregister_context(struct intel_guc *guc, - u32 guc_id, - bool loop) + u32 guc_id) { u32 action[] = { INTEL_GUC_ACTION_DEREGISTER_CONTEXT, @@ -1243,33 +2123,38 @@ static int __guc_action_deregister_context(struct intel_guc *guc, return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), G2H_LEN_DW_DEREGISTER_CONTEXT, - loop); + true); } -static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop) +static int deregister_context(struct intel_context *ce, u32 guc_id) { struct intel_guc *guc = ce_to_guc(ce); + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_deregister(ce); - return __guc_action_deregister_context(guc, guc_id, loop); + return __guc_action_deregister_context(guc, guc_id); } -static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask) +static inline void clear_children_join_go_memory(struct intel_context *ce) { - switch (class) { - case RENDER_CLASS: - return mask >> RCS0; - case VIDEO_ENHANCEMENT_CLASS: - return mask >> VECS0; - case VIDEO_DECODE_CLASS: - return mask >> VCS0; - case COPY_ENGINE_CLASS: - return mask >> BCS0; - default: - MISSING_CASE(class); - return 0; - } + struct parent_scratch *ps = __get_parent_scratch(ce); + int i; + + ps->go.semaphore = 0; + for (i = 0; i < ce->parallel.number_children + 1; ++i) + ps->join[i].semaphore = 0; +} + +static inline u32 get_children_go_value(struct intel_context *ce) +{ + return __get_parent_scratch(ce)->go.semaphore; +} + +static inline u32 get_children_join_value(struct intel_context *ce, + u8 child_index) +{ + return __get_parent_scratch(ce)->join[child_index].semaphore; } static void guc_context_policy_init(struct intel_engine_cs *engine, @@ -1285,22 +2170,20 @@ static void guc_context_policy_init(struct intel_engine_cs *engine, desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000; } -static inline u8 map_i915_prio_to_guc_prio(int prio); - static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; struct intel_guc *guc = &engine->gt->uc.guc; - u32 desc_idx = ce->guc_id; + u32 desc_idx = ce->guc_id.id; struct guc_lrc_desc *desc; - const struct i915_gem_context *ctx; - int prio = I915_CONTEXT_DEFAULT_PRIORITY; bool context_registered; intel_wakeref_t wakeref; + struct intel_context *child; int ret = 0; GEM_BUG_ON(!engine->mask); + GEM_BUG_ON(!sched_state_is_init(ce)); /* * Ensure LRC + CT vmas are is same region as write barrier is done @@ -1311,25 +2194,53 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) context_registered = lrc_desc_registered(guc, desc_idx); - rcu_read_lock(); - ctx = rcu_dereference(ce->gem_context); - if (ctx) - prio = ctx->sched.priority; - rcu_read_unlock(); - reset_lrc_desc(guc, desc_idx); set_lrc_desc_registered(guc, desc_idx, ce); desc = __get_lrc_desc(guc, desc_idx); desc->engine_class = engine_class_to_guc_class(engine->class); - desc->engine_submit_mask = adjust_engine_mask(engine->class, - engine->mask); + desc->engine_submit_mask = engine->logical_mask; desc->hw_context_desc = ce->lrc.lrca; - ce->guc_prio = map_i915_prio_to_guc_prio(prio); - desc->priority = ce->guc_prio; + desc->priority = ce->guc_state.prio; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; guc_context_policy_init(engine, desc); - init_sched_state(ce); + + /* + * If context is a parent, we need to register a process descriptor + * describing a work queue and register all child contexts. + */ + if (intel_context_is_parent(ce)) { + struct guc_process_desc *pdesc; + + ce->parallel.guc.wqi_tail = 0; + ce->parallel.guc.wqi_head = 0; + + desc->process_desc = i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce); + desc->wq_addr = i915_ggtt_offset(ce->state) + + __get_wq_offset(ce); + desc->wq_size = WQ_SIZE; + + pdesc = __get_process_desc(ce); + memset(pdesc, 0, sizeof(*(pdesc))); + pdesc->stage_id = ce->guc_id.id; + pdesc->wq_base_addr = desc->wq_addr; + pdesc->wq_size_bytes = desc->wq_size; + pdesc->wq_status = WQ_STATUS_ACTIVE; + + for_each_child(ce, child) { + desc = __get_lrc_desc(guc, child->guc_id.id); + + desc->engine_class = + engine_class_to_guc_class(engine->class); + desc->hw_context_desc = child->lrc.lrca; + desc->priority = ce->guc_state.prio; + desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; + guc_context_policy_init(engine, desc); + } + + clear_children_join_go_memory(ce); + } /* * The context_lookup xarray is used to determine if the hardware @@ -1340,26 +2251,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) * registering this context. */ if (context_registered) { + bool disabled; + unsigned long flags; + trace_intel_context_steal_guc_id(ce); - if (!loop) { + GEM_BUG_ON(!loop); + + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) { set_context_wait_for_deregister_to_register(ce); intel_context_get(ce); - } else { - bool disabled; - unsigned long flags; - - /* Seal race with Reset */ - spin_lock_irqsave(&ce->guc_state.lock, flags); - disabled = submission_disabled(guc); - if (likely(!disabled)) { - set_context_wait_for_deregister_to_register(ce); - intel_context_get(ce); - } - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - if (unlikely(disabled)) { - reset_lrc_desc(guc, desc_idx); - return 0; /* Will get registered later */ - } + } + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + reset_lrc_desc(guc, desc_idx); + return 0; /* Will get registered later */ } /* @@ -1367,20 +2275,18 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) * context whose guc_id was stolen. */ with_intel_runtime_pm(runtime_pm, wakeref) - ret = deregister_context(ce, ce->guc_id, loop); - if (unlikely(ret == -EBUSY)) { - clr_context_wait_for_deregister_to_register(ce); - intel_context_put(ce); - } else if (unlikely(ret == -ENODEV)) { + ret = deregister_context(ce, ce->guc_id.id); + if (unlikely(ret == -ENODEV)) ret = 0; /* Will get registered later */ - } } else { with_intel_runtime_pm(runtime_pm, wakeref) ret = register_context(ce, loop); - if (unlikely(ret == -EBUSY)) + if (unlikely(ret == -EBUSY)) { + reset_lrc_desc(guc, desc_idx); + } else if (unlikely(ret == -ENODEV)) { reset_lrc_desc(guc, desc_idx); - else if (unlikely(ret == -ENODEV)) ret = 0; /* Will get registered later */ + } } return ret; @@ -1419,7 +2325,12 @@ static int guc_context_pre_pin(struct intel_context *ce, static int guc_context_pin(struct intel_context *ce, void *vaddr) { - return __guc_context_pin(ce, ce->engine, vaddr); + int ret = __guc_context_pin(ce, ce->engine, vaddr); + + if (likely(!ret && !intel_context_is_barrier(ce))) + intel_engine_pm_get(ce->engine); + + return ret; } static void guc_context_unpin(struct intel_context *ce) @@ -1428,6 +2339,9 @@ static void guc_context_unpin(struct intel_context *ce) unpin_guc_id(guc, ce); lrc_unpin(ce); + + if (likely(!intel_context_is_barrier(ce))) + intel_engine_pm_put_async(ce->engine); } static void guc_context_post_unpin(struct intel_context *ce) @@ -1440,7 +2354,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, - ce->guc_id, + ce->guc_id.id, GUC_CONTEXT_ENABLE }; @@ -1456,12 +2370,13 @@ static void __guc_context_sched_disable(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET, - guc_id, /* ce->guc_id not stable */ + guc_id, /* ce->guc_id.id not stable */ GUC_CONTEXT_DISABLE }; GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID); + GEM_BUG_ON(intel_context_is_child(ce)); trace_intel_context_sched_disable(ce); guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), @@ -1472,24 +2387,24 @@ static void guc_blocked_fence_complete(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); - if (!i915_sw_fence_done(&ce->guc_blocked)) - i915_sw_fence_complete(&ce->guc_blocked); + if (!i915_sw_fence_done(&ce->guc_state.blocked)) + i915_sw_fence_complete(&ce->guc_state.blocked); } static void guc_blocked_fence_reinit(struct intel_context *ce) { lockdep_assert_held(&ce->guc_state.lock); - GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked)); + GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked)); /* * This fence is always complete unless a pending schedule disable is * outstanding. We arm the fence here and complete it when we receive * the pending schedule disable complete message. */ - i915_sw_fence_fini(&ce->guc_blocked); - i915_sw_fence_reinit(&ce->guc_blocked); - i915_sw_fence_await(&ce->guc_blocked); - i915_sw_fence_commit(&ce->guc_blocked); + i915_sw_fence_fini(&ce->guc_state.blocked); + i915_sw_fence_reinit(&ce->guc_state.blocked); + i915_sw_fence_await(&ce->guc_state.blocked); + i915_sw_fence_commit(&ce->guc_state.blocked); } static u16 prep_context_pending_disable(struct intel_context *ce) @@ -1501,35 +2416,30 @@ static u16 prep_context_pending_disable(struct intel_context *ce) guc_blocked_fence_reinit(ce); intel_context_get(ce); - return ce->guc_id; + return ce->guc_id.id; } static struct i915_sw_fence *guc_context_block(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); - struct i915_sched_engine *sched_engine = ce->engine->sched_engine; unsigned long flags; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; intel_wakeref_t wakeref; u16 guc_id; bool enabled; + GEM_BUG_ON(intel_context_is_child(ce)); + spin_lock_irqsave(&ce->guc_state.lock, flags); - /* - * Sync with submission path, increment before below changes to context - * state. - */ - spin_lock(&sched_engine->lock); incr_context_blocked(ce); - spin_unlock(&sched_engine->lock); enabled = context_enabled(ce); if (unlikely(!enabled || submission_disabled(guc))) { if (enabled) clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); - return &ce->guc_blocked; + return &ce->guc_state.blocked; } /* @@ -1545,26 +2455,41 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) with_intel_runtime_pm(runtime_pm, wakeref) __guc_context_sched_disable(guc, ce, guc_id); - return &ce->guc_blocked; + return &ce->guc_state.blocked; +} + +#define SCHED_STATE_MULTI_BLOCKED_MASK \ + (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED) +#define SCHED_STATE_NO_UNBLOCK \ + (SCHED_STATE_MULTI_BLOCKED_MASK | \ + SCHED_STATE_PENDING_DISABLE | \ + SCHED_STATE_BANNED) + +static bool context_cant_unblock(struct intel_context *ce) +{ + lockdep_assert_held(&ce->guc_state.lock); + + return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || + context_guc_id_invalid(ce) || + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) || + !intel_context_is_pinned(ce); } static void guc_context_unblock(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); - struct i915_sched_engine *sched_engine = ce->engine->sched_engine; unsigned long flags; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; intel_wakeref_t wakeref; bool enable; GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_child(ce)); spin_lock_irqsave(&ce->guc_state.lock, flags); if (unlikely(submission_disabled(guc) || - !intel_context_is_pinned(ce) || - context_pending_disable(ce) || - context_blocked(ce) > 1)) { + context_cant_unblock(ce))) { enable = false; } else { enable = true; @@ -1573,13 +2498,7 @@ static void guc_context_unblock(struct intel_context *ce) intel_context_get(ce); } - /* - * Sync with submission path, decrement after above changes to context - * state. - */ - spin_lock(&sched_engine->lock); decr_context_blocked(ce); - spin_unlock(&sched_engine->lock); spin_unlock_irqrestore(&ce->guc_state.lock, flags); @@ -1592,16 +2511,29 @@ static void guc_context_unblock(struct intel_context *ce) static void guc_context_cancel_request(struct intel_context *ce, struct i915_request *rq) { + struct intel_context *block_context = + request_to_scheduling_context(rq); + if (i915_sw_fence_signaled(&rq->submit)) { - struct i915_sw_fence *fence = guc_context_block(ce); + struct i915_sw_fence *fence; + intel_context_get(ce); + fence = guc_context_block(block_context); i915_sw_fence_wait(fence); if (!i915_request_completed(rq)) { __i915_request_skip(rq); guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head), true); } - guc_context_unblock(ce); + + /* + * XXX: Racey if context is reset, see comment in + * __guc_reset_context(). + */ + flush_work(&ce_to_guc(ce)->ct.requests.worker); + + guc_context_unblock(block_context); + intel_context_put(ce); } } @@ -1626,6 +2558,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) intel_wakeref_t wakeref; unsigned long flags; + GEM_BUG_ON(intel_context_is_child(ce)); + guc_flush_submissions(guc); spin_lock_irqsave(&ce->guc_state.lock, flags); @@ -1662,7 +2596,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq) if (!context_guc_id_invalid(ce)) with_intel_runtime_pm(runtime_pm, wakeref) __guc_context_set_preemption_timeout(guc, - ce->guc_id, + ce->guc_id.id, 1); spin_unlock_irqrestore(&ce->guc_state.lock, flags); } @@ -1675,40 +2609,24 @@ static void guc_context_sched_disable(struct intel_context *ce) struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; intel_wakeref_t wakeref; u16 guc_id; - bool enabled; - if (submission_disabled(guc) || context_guc_id_invalid(ce) || - !lrc_desc_registered(guc, ce->guc_id)) { - clr_context_enabled(ce); - goto unpin; - } - - if (!context_enabled(ce)) - goto unpin; + GEM_BUG_ON(intel_context_is_child(ce)); spin_lock_irqsave(&ce->guc_state.lock, flags); /* - * We have to check if the context has been disabled by another thread. - * We also have to check if the context has been pinned again as another - * pin operation is allowed to pass this function. Checking the pin - * count, within ce->guc_state.lock, synchronizes this function with - * guc_request_alloc ensuring a request doesn't slip through the - * 'context_pending_disable' fence. Checking within the spin lock (can't - * sleep) ensures another process doesn't pin this context and generate - * a request before we set the 'context_pending_disable' flag here. + * We have to check if the context has been disabled by another thread, + * check if submssion has been disabled to seal a race with reset and + * finally check if any more requests have been committed to the + * context ensursing that a request doesn't slip through the + * 'context_pending_disable' fence. */ - enabled = context_enabled(ce); - if (unlikely(!enabled || submission_disabled(guc))) { - if (enabled) - clr_context_enabled(ce); + if (unlikely(!context_enabled(ce) || submission_disabled(guc) || + context_has_committed_requests(ce))) { + clr_context_enabled(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); goto unpin; } - if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) { - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - return; - } guc_id = prep_context_pending_disable(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); @@ -1724,21 +2642,41 @@ unpin: static inline void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); + struct intel_gt *gt = guc_to_gt(guc); + unsigned long flags; + bool disabled; - GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id)); - GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); + lockdep_assert_held(&guc->submission_state.lock); + GEM_BUG_ON(!intel_gt_pm_is_awake(gt)); + GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id)); + GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); GEM_BUG_ON(context_enabled(ce)); - clr_context_registered(ce); - deregister_context(ce, ce->guc_id, true); + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) { + __intel_gt_pm_get(gt); + set_context_destroyed(ce); + clr_context_registered(ce); + } + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + __release_guc_id(guc, ce); + __guc_context_destroy(ce); + return; + } + + deregister_context(ce, ce->guc_id.id); } static void __guc_context_destroy(struct intel_context *ce) { - GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || - ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]); + GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || + ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); + GEM_BUG_ON(ce->guc_state.number_committed_requests); lrc_fini(ce); intel_context_fini(ce); @@ -1756,76 +2694,86 @@ static void __guc_context_destroy(struct intel_context *ce) } } +static void guc_flush_destroyed_contexts(struct intel_guc *guc) +{ + struct intel_context *ce, *cn; + unsigned long flags; + + GEM_BUG_ON(!submission_disabled(guc) && + guc_submission_initialized(guc)); + + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_for_each_entry_safe(ce, cn, + &guc->submission_state.destroyed_contexts, + destroyed_link) { + list_del_init(&ce->destroyed_link); + __release_guc_id(guc, ce); + __guc_context_destroy(ce); + } + spin_unlock_irqrestore(&guc->submission_state.lock, flags); +} + +static void deregister_destroyed_contexts(struct intel_guc *guc) +{ + struct intel_context *ce, *cn; + unsigned long flags; + + spin_lock_irqsave(&guc->submission_state.lock, flags); + list_for_each_entry_safe(ce, cn, + &guc->submission_state.destroyed_contexts, + destroyed_link) { + list_del_init(&ce->destroyed_link); + guc_lrc_desc_unpin(ce); + } + spin_unlock_irqrestore(&guc->submission_state.lock, flags); +} + +static void destroyed_worker_func(struct work_struct *w) +{ + struct intel_guc *guc = container_of(w, struct intel_guc, + submission_state.destroyed_worker); + struct intel_gt *gt = guc_to_gt(guc); + int tmp; + + with_intel_gt_pm(gt, tmp) + deregister_destroyed_contexts(guc); +} + static void guc_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); - struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; struct intel_guc *guc = ce_to_guc(ce); - intel_wakeref_t wakeref; unsigned long flags; - bool disabled; + bool destroy; /* * If the guc_id is invalid this context has been stolen and we can free * it immediately. Also can be freed immediately if the context is not * registered with the GuC or the GuC is in the middle of a reset. */ - if (context_guc_id_invalid(ce)) { - __guc_context_destroy(ce); - return; - } else if (submission_disabled(guc) || - !lrc_desc_registered(guc, ce->guc_id)) { - release_guc_id(guc, ce); - __guc_context_destroy(ce); - return; - } - - /* - * We have to acquire the context spinlock and check guc_id again, if it - * is valid it hasn't been stolen and needs to be deregistered. We - * delete this context from the list of unpinned guc_ids available to - * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB - * returns indicating this context has been deregistered the guc_id is - * returned to the pool of available guc_ids. - */ - spin_lock_irqsave(&guc->contexts_lock, flags); - if (context_guc_id_invalid(ce)) { - spin_unlock_irqrestore(&guc->contexts_lock, flags); - __guc_context_destroy(ce); - return; + spin_lock_irqsave(&guc->submission_state.lock, flags); + destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || + !lrc_desc_registered(guc, ce->guc_id.id); + if (likely(!destroy)) { + if (!list_empty(&ce->guc_id.link)) + list_del_init(&ce->guc_id.link); + list_add_tail(&ce->destroyed_link, + &guc->submission_state.destroyed_contexts); + } else { + __release_guc_id(guc, ce); } - - if (!list_empty(&ce->guc_id_link)) - list_del_init(&ce->guc_id_link); - spin_unlock_irqrestore(&guc->contexts_lock, flags); - - /* Seal race with Reset */ - spin_lock_irqsave(&ce->guc_state.lock, flags); - disabled = submission_disabled(guc); - if (likely(!disabled)) - set_context_destroyed(ce); - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - if (unlikely(disabled)) { - release_guc_id(guc, ce); + spin_unlock_irqrestore(&guc->submission_state.lock, flags); + if (unlikely(destroy)) { __guc_context_destroy(ce); return; } /* - * We defer GuC context deregistration until the context is destroyed - * in order to save on CTBs. With this optimization ideally we only need - * 1 CTB to register the context during the first pin and 1 CTB to - * deregister the context when the context is destroyed. Without this - * optimization, a CTB would be needed every pin & unpin. - * - * XXX: Need to acqiure the runtime wakeref as this can be triggered - * from context_free_worker when runtime wakeref is not held. - * guc_lrc_desc_unpin requires the runtime as a GuC register is written - * in H2G CTB to deregister the context. A future patch may defer this - * H2G CTB if the runtime wakeref is zero. + * We use a worker to issue the H2G to deregister the context as we can + * take the GT PM for the first time which isn't allowed from an atomic + * context. */ - with_intel_runtime_pm(runtime_pm, wakeref) - guc_lrc_desc_unpin(ce); + queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker); } static int guc_context_alloc(struct intel_context *ce) @@ -1839,20 +2787,23 @@ static void guc_context_set_prio(struct intel_guc *guc, { u32 action[] = { INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY, - ce->guc_id, + ce->guc_id.id, prio, }; GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || prio > GUC_CLIENT_PRIORITY_NORMAL); + lockdep_assert_held(&ce->guc_state.lock); - if (ce->guc_prio == prio || submission_disabled(guc) || - !context_registered(ce)) + if (ce->guc_state.prio == prio || submission_disabled(guc) || + !context_registered(ce)) { + ce->guc_state.prio = prio; return; + } guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); - ce->guc_prio = prio; + ce->guc_state.prio = prio; trace_intel_context_set_prio(ce); } @@ -1871,25 +2822,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio) static inline void add_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { - lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count)); + lockdep_assert_held(&ce->guc_state.lock); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); - ++ce->guc_prio_count[guc_prio]; + ++ce->guc_state.prio_count[guc_prio]; /* Overflow protection */ - GEM_WARN_ON(!ce->guc_prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); } static inline void sub_context_inflight_prio(struct intel_context *ce, u8 guc_prio) { - lockdep_assert_held(&ce->guc_active.lock); - GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count)); + lockdep_assert_held(&ce->guc_state.lock); + GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); /* Underflow protection */ - GEM_WARN_ON(!ce->guc_prio_count[guc_prio]); + GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); - --ce->guc_prio_count[guc_prio]; + --ce->guc_state.prio_count[guc_prio]; } static inline void update_context_prio(struct intel_context *ce) @@ -1900,10 +2851,10 @@ static inline void update_context_prio(struct intel_context *ce) BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0); BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL); - lockdep_assert_held(&ce->guc_active.lock); + lockdep_assert_held(&ce->guc_state.lock); - for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) { - if (ce->guc_prio_count[i]) { + for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) { + if (ce->guc_state.prio_count[i]) { guc_context_set_prio(guc, ce, i); break; } @@ -1918,13 +2869,14 @@ static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio) static void add_to_context(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq)); + GEM_BUG_ON(intel_context_is_child(ce)); GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI); - spin_lock(&ce->guc_active.lock); - list_move_tail(&rq->sched.link, &ce->guc_active.requests); + spin_lock(&ce->guc_state.lock); + list_move_tail(&rq->sched.link, &ce->guc_state.requests); if (rq->guc_prio == GUC_PRIO_INIT) { rq->guc_prio = new_guc_prio; @@ -1936,12 +2888,12 @@ static void add_to_context(struct i915_request *rq) } update_context_prio(ce); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); } static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) { - lockdep_assert_held(&ce->guc_active.lock); + lockdep_assert_held(&ce->guc_state.lock); if (rq->guc_prio != GUC_PRIO_INIT && rq->guc_prio != GUC_PRIO_FINI) { @@ -1953,9 +2905,11 @@ static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) static void remove_from_context(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); + + GEM_BUG_ON(intel_context_is_child(ce)); - spin_lock_irq(&ce->guc_active.lock); + spin_lock_irq(&ce->guc_state.lock); list_del_init(&rq->sched.link); clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); @@ -1965,9 +2919,11 @@ static void remove_from_context(struct i915_request *rq) guc_prio_fini(rq, ce); - spin_unlock_irq(&ce->guc_active.lock); + decr_context_committed_requests(ce); - atomic_dec(&ce->guc_id_ref); + spin_unlock_irq(&ce->guc_state.lock); + + atomic_dec(&ce->guc_id.ref); i915_request_notify_execute_cb_imm(rq); } @@ -1992,19 +2948,35 @@ static const struct intel_context_ops guc_context_ops = { .destroy = guc_context_destroy, .create_virtual = guc_create_virtual, + .create_parallel = guc_create_parallel, }; +static void submit_work_cb(struct irq_work *wrk) +{ + struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work); + + might_lock(&rq->engine->sched_engine->lock); + i915_sw_fence_complete(&rq->submit); +} + static void __guc_signal_context_fence(struct intel_context *ce) { - struct i915_request *rq; + struct i915_request *rq, *rn; lockdep_assert_held(&ce->guc_state.lock); if (!list_empty(&ce->guc_state.fences)) trace_intel_context_fence_release(ce); - list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link) - i915_sw_fence_complete(&rq->submit); + /* + * Use an IRQ to ensure locking order of sched_engine->lock -> + * ce->guc_state.lock is preserved. + */ + list_for_each_entry_safe(rq, rn, &ce->guc_state.fences, + guc_fence_link) { + list_del(&rq->guc_fence_link); + irq_work_queue(&rq->submit_work); + } INIT_LIST_HEAD(&ce->guc_state.fences); } @@ -2013,6 +2985,8 @@ static void guc_signal_context_fence(struct intel_context *ce) { unsigned long flags; + GEM_BUG_ON(intel_context_is_child(ce)); + spin_lock_irqsave(&ce->guc_state.lock, flags); clr_context_wait_for_deregister_to_register(ce); __guc_signal_context_fence(ce); @@ -2022,13 +2996,28 @@ static void guc_signal_context_fence(struct intel_context *ce) static bool context_needs_register(struct intel_context *ce, bool new_guc_id) { return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) && + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) && !submission_disabled(ce_to_guc(ce)); } +static void guc_context_init(struct intel_context *ce) +{ + const struct i915_gem_context *ctx; + int prio = I915_CONTEXT_DEFAULT_PRIORITY; + + rcu_read_lock(); + ctx = rcu_dereference(ce->gem_context); + if (ctx) + prio = ctx->sched.priority; + rcu_read_unlock(); + + ce->guc_state.prio = map_i915_prio_to_guc_prio(prio); + set_bit(CONTEXT_GUC_INIT, &ce->flags); +} + static int guc_request_alloc(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); struct intel_guc *guc = ce_to_guc(ce); unsigned long flags; int ret; @@ -2057,14 +3046,17 @@ static int guc_request_alloc(struct i915_request *rq) rq->reserved_space -= GUC_REQUEST_SIZE; + if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags))) + guc_context_init(ce); + /* * Call pin_guc_id here rather than in the pinning step as with * dma_resv, contexts can be repeatedly pinned / unpinned trashing the - * guc_ids and creating horrible race conditions. This is especially bad - * when guc_ids are being stolen due to over subscription. By the time + * guc_id and creating horrible race conditions. This is especially bad + * when guc_id are being stolen due to over subscription. By the time * this function is reached, it is guaranteed that the guc_id will be * persistent until the generated request is retired. Thus, sealing these - * race conditions. It is still safe to fail here if guc_ids are + * race conditions. It is still safe to fail here if guc_id are * exhausted and return -EAGAIN to the user indicating that they can try * again in the future. * @@ -2074,7 +3066,7 @@ static int guc_request_alloc(struct i915_request *rq) * decremented on each retire. When it is zero, a lock around the * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id. */ - if (atomic_add_unless(&ce->guc_id_ref, 1, 0)) + if (atomic_add_unless(&ce->guc_id.ref, 1, 0)) goto out; ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ @@ -2087,7 +3079,7 @@ static int guc_request_alloc(struct i915_request *rq) disable_submission(guc); goto out; /* GPU will be reset */ } - atomic_dec(&ce->guc_id_ref); + atomic_dec(&ce->guc_id.ref); unpin_guc_id(guc, ce); return ret; } @@ -2102,22 +3094,16 @@ out: * schedule enable or context registration if either G2H is pending * respectfully. Once a G2H returns, the fence is released that is * blocking these requests (see guc_signal_context_fence). - * - * We can safely check the below fields outside of the lock as it isn't - * possible for these fields to transition from being clear to set but - * converse is possible, hence the need for the check within the lock. */ - if (likely(!context_wait_for_deregister_to_register(ce) && - !context_pending_disable(ce))) - return 0; - spin_lock_irqsave(&ce->guc_state.lock, flags); if (context_wait_for_deregister_to_register(ce) || context_pending_disable(ce)) { + init_irq_work(&rq->submit_work, submit_work_cb); i915_sw_fence_await(&rq->submit); list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); } + incr_context_committed_requests(ce); spin_unlock_irqrestore(&ce->guc_state.lock, flags); return 0; @@ -2135,8 +3121,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce, static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr) { struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + int ret = __guc_context_pin(ce, engine, vaddr); + intel_engine_mask_t tmp, mask = ce->engine->mask; - return __guc_context_pin(ce, engine, vaddr); + if (likely(!ret)) + for_each_engine_masked(engine, ce->engine->gt, mask, tmp) + intel_engine_pm_get(engine); + + return ret; +} + +static void guc_virtual_context_unpin(struct intel_context *ce) +{ + intel_engine_mask_t tmp, mask = ce->engine->mask; + struct intel_engine_cs *engine; + struct intel_guc *guc = ce_to_guc(ce); + + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + + unpin_guc_id(guc, ce); + lrc_unpin(ce); + + for_each_engine_masked(engine, ce->engine->gt, mask, tmp) + intel_engine_pm_put_async(engine); } static void guc_virtual_context_enter(struct intel_context *ce) @@ -2173,7 +3181,98 @@ static const struct intel_context_ops virtual_guc_context_ops = { .pre_pin = guc_virtual_context_pre_pin, .pin = guc_virtual_context_pin, - .unpin = guc_context_unpin, + .unpin = guc_virtual_context_unpin, + .post_unpin = guc_context_post_unpin, + + .ban = guc_context_ban, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .sched_disable = guc_context_sched_disable, + + .destroy = guc_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) +{ + struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + struct intel_guc *guc = ce_to_guc(ce); + int ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + ret = pin_guc_id(guc, ce); + if (unlikely(ret < 0)) + return ret; + + return __guc_context_pin(ce, engine, vaddr); +} + +static int guc_child_context_pin(struct intel_context *ce, void *vaddr) +{ + struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); + + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + __intel_context_pin(ce->parallel.parent); + return __guc_context_pin(ce, engine, vaddr); +} + +static void guc_parent_context_unpin(struct intel_context *ce) +{ + struct intel_guc *guc = ce_to_guc(ce); + + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + GEM_BUG_ON(!intel_context_is_parent(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + if (ce->parallel.last_rq) + i915_request_put(ce->parallel.last_rq); + unpin_guc_id(guc, ce); + lrc_unpin(ce); +} + +static void guc_child_context_unpin(struct intel_context *ce) +{ + GEM_BUG_ON(context_enabled(ce)); + GEM_BUG_ON(intel_context_is_barrier(ce)); + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + lrc_unpin(ce); +} + +static void guc_child_context_post_unpin(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_child(ce)); + GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); + GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); + + lrc_post_unpin(ce); + intel_context_unpin(ce->parallel.parent); +} + +static void guc_child_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + __guc_context_destroy(ce); +} + +static const struct intel_context_ops virtual_parent_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_parent_context_pin, + .unpin = guc_parent_context_unpin, .post_unpin = guc_context_post_unpin, .ban = guc_context_ban, @@ -2190,6 +3289,110 @@ static const struct intel_context_ops virtual_guc_context_ops = { .get_sibling = guc_virtual_get_sibling, }; +static const struct intel_context_ops virtual_child_context_ops = { + .alloc = guc_virtual_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_child_context_pin, + .unpin = guc_child_context_unpin, + .post_unpin = guc_child_context_post_unpin, + + .cancel_request = guc_context_cancel_request, + + .enter = guc_virtual_context_enter, + .exit = guc_virtual_context_exit, + + .destroy = guc_child_context_destroy, + + .get_sibling = guc_virtual_get_sibling, +}; + +/* + * The below override of the breadcrumbs is enabled when the user configures a + * context for parallel submission (multi-lrc, parent-child). + * + * The overridden breadcrumbs implements an algorithm which allows the GuC to + * safely preempt all the hw contexts configured for parallel submission + * between each BB. The contract between the i915 and GuC is if the parent + * context can be preempted, all the children can be preempted, and the GuC will + * always try to preempt the parent before the children. A handshake between the + * parent / children breadcrumbs ensures the i915 holds up its end of the deal + * creating a window to preempt between each set of BBs. + */ +static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +static u32 * +emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs); +static u32 * +emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs); + +static struct intel_context * +guc_create_parallel(struct intel_engine_cs **engines, + unsigned int num_siblings, + unsigned int width) +{ + struct intel_engine_cs **siblings = NULL; + struct intel_context *parent = NULL, *ce, *err; + int i, j; + + siblings = kmalloc_array(num_siblings, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < width; ++i) { + for (j = 0; j < num_siblings; ++j) + siblings[j] = engines[i * num_siblings + j]; + + ce = intel_engine_create_virtual(siblings, num_siblings, + FORCE_VIRTUAL); + if (IS_ERR(ce)) { + err = ERR_CAST(ce); + goto unwind; + } + + if (i == 0) { + parent = ce; + parent->ops = &virtual_parent_context_ops; + } else { + ce->ops = &virtual_child_context_ops; + intel_context_bind_parent_child(parent, ce); + } + } + + parent->parallel.fence_context = dma_fence_context_alloc(1); + + parent->engine->emit_bb_start = + emit_bb_start_parent_no_preempt_mid_batch; + parent->engine->emit_fini_breadcrumb = + emit_fini_breadcrumb_parent_no_preempt_mid_batch; + parent->engine->emit_fini_breadcrumb_dw = + 12 + 4 * parent->parallel.number_children; + for_each_child(parent, ce) { + ce->engine->emit_bb_start = + emit_bb_start_child_no_preempt_mid_batch; + ce->engine->emit_fini_breadcrumb = + emit_fini_breadcrumb_child_no_preempt_mid_batch; + ce->engine->emit_fini_breadcrumb_dw = 16; + } + + kfree(siblings); + return parent; + +unwind: + if (parent) + intel_context_put(parent); + kfree(siblings); + return err; +} + static bool guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b) { @@ -2249,7 +3452,7 @@ static void guc_init_breadcrumbs(struct intel_engine_cs *engine) static void guc_bump_inflight_request_prio(struct i915_request *rq, int prio) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); u8 new_guc_prio = map_i915_prio_to_guc_prio(prio); /* Short circuit function */ @@ -2259,7 +3462,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, !new_guc_prio_higher(rq->guc_prio, new_guc_prio))) return; - spin_lock(&ce->guc_active.lock); + spin_lock(&ce->guc_state.lock); if (rq->guc_prio != GUC_PRIO_FINI) { if (rq->guc_prio != GUC_PRIO_INIT) sub_context_inflight_prio(ce, rq->guc_prio); @@ -2267,16 +3470,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, add_context_inflight_prio(ce, rq->guc_prio); update_context_prio(ce); } - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); } static void guc_retire_inflight_request_prio(struct i915_request *rq) { - struct intel_context *ce = rq->context; + struct intel_context *ce = request_to_scheduling_context(rq); - spin_lock(&ce->guc_active.lock); + spin_lock(&ce->guc_state.lock); guc_prio_fini(rq, ce); - spin_unlock(&ce->guc_active.lock); + spin_unlock(&ce->guc_state.lock); } static void sanitize_hwsp(struct intel_engine_cs *engine) @@ -2310,6 +3513,8 @@ static void guc_sanitize(struct intel_engine_cs *engine) /* And scrub the dirty cachelines for the HWSP */ clflush_cache_range(engine->status_page.addr, PAGE_SIZE); + + intel_engine_reset_pinned_contexts(engine); } static void setup_hwsp(struct intel_engine_cs *engine) @@ -2385,9 +3590,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc) * and even it did this code would be run again. */ - for_each_engine(engine, gt, id) - if (engine->kernel_context) - guc_kernel_context_pin(guc, engine->kernel_context); + for_each_engine(engine, gt, id) { + struct intel_context *ce; + + list_for_each_entry(ce, &engine->pinned_contexts_list, + pinned_contexts_link) + guc_kernel_context_pin(guc, ce); + } } static void guc_release(struct intel_engine_cs *engine) @@ -2433,7 +3642,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = guc_set_default_submission; + engine->busyness = guc_engine_busyness; + engine->flags |= I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_HAS_PREEMPTION; engine->flags |= I915_ENGINE_HAS_TIMESLICES; @@ -2532,6 +3743,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_init_lrc_mapping(guc); + guc_init_engine_stats(guc); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -2580,13 +3792,13 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx) return NULL; } - return ce; -} + if (unlikely(intel_context_is_child(ce))) { + drm_err(&guc_to_gt(guc)->i915->drm, + "Context is child, desc_idx %u", desc_idx); + return NULL; + } -static void decr_outstanding_submission_g2h(struct intel_guc *guc) -{ - if (atomic_dec_and_test(&guc->outstanding_submission_g2h)) - wake_up_all(&guc->ct.wq); + return ce; } int intel_guc_deregister_done_process_msg(struct intel_guc *guc, @@ -2607,6 +3819,13 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, trace_intel_context_deregister_done(ce); +#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_deregister)) { + ce->drop_deregister = false; + return 0; + } +#endif + if (context_wait_for_deregister_to_register(ce)) { struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; @@ -2622,6 +3841,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, intel_context_put(ce); } else if (context_destroyed(ce)) { /* Context has been destroyed */ + intel_gt_pm_put_async(guc_to_gt(guc)); release_guc_id(guc, ce); __guc_context_destroy(ce); } @@ -2652,8 +3872,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, (!context_pending_enable(ce) && !context_pending_disable(ce)))) { drm_err(&guc_to_gt(guc)->i915->drm, - "Bad context sched_state 0x%x, 0x%x, desc_idx %u", - atomic_read(&ce->guc_sched_state_no_lock), + "Bad context sched_state 0x%x, desc_idx %u", ce->guc_state.sched_state, desc_idx); return -EPROTO; } @@ -2661,10 +3880,26 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, trace_intel_context_sched_done(ce); if (context_pending_enable(ce)) { +#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_schedule_enable)) { + ce->drop_schedule_enable = false; + return 0; + } +#endif + + spin_lock_irqsave(&ce->guc_state.lock, flags); clr_context_pending_enable(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); } else if (context_pending_disable(ce)) { bool banned; +#ifdef CONFIG_DRM_I915_SELFTEST + if (unlikely(ce->drop_schedule_disable)) { + ce->drop_schedule_disable = false; + return 0; + } +#endif + /* * Unpin must be done before __guc_signal_context_fence, * otherwise a race exists between the requests getting @@ -2721,7 +3956,12 @@ static void guc_handle_context_reset(struct intel_guc *guc, { trace_intel_context_reset(ce); - if (likely(!intel_context_is_banned(ce))) { + /* + * XXX: Racey if request cancellation has occurred, see comment in + * __guc_reset_context(). + */ + if (likely(!intel_context_is_banned(ce) && + !context_blocked(ce))) { capture_error_state(guc, ce); guc_context_replay(ce); } @@ -2731,6 +3971,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_context *ce; + unsigned long flags; int desc_idx; if (unlikely(len != 1)) { @@ -2739,11 +3980,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, } desc_idx = msg[0]; + + /* + * The context lookup uses the xarray but lookups only require an RCU lock + * not the full spinlock. So take the lock explicitly and keep it until the + * context has been reference count locked to ensure it can't be destroyed + * asynchronously until the reset is done. + */ + xa_lock_irqsave(&guc->context_lookup, flags); ce = g2h_context_lookup(guc, desc_idx); + if (ce) + intel_context_get(ce); + xa_unlock_irqrestore(&guc->context_lookup, flags); + if (unlikely(!ce)) return -EPROTO; guc_handle_context_reset(guc, ce); + intel_context_put(ce); return 0; } @@ -2797,33 +4051,47 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine) struct intel_context *ce; struct i915_request *rq; unsigned long index; + unsigned long flags; /* Reset called during driver load? GuC not yet initialised! */ if (unlikely(!guc_submission_initialized(guc))) return; + xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - if (!intel_context_is_pinned(ce)) + if (!kref_get_unless_zero(&ce->ref)) continue; + xa_unlock(&guc->context_lookup); + + if (!intel_context_is_pinned(ce)) + goto next; + if (intel_engine_is_virtual(ce->engine)) { if (!(ce->engine->mask & engine->mask)) - continue; + goto next; } else { if (ce->engine != engine) - continue; + goto next; } - list_for_each_entry(rq, &ce->guc_active.requests, sched.link) { + list_for_each_entry(rq, &ce->guc_state.requests, sched.link) { if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE) continue; intel_engine_set_hung_context(engine, ce); /* Can only cope with one hang at a time... */ - return; + intel_context_put(ce); + xa_lock(&guc->context_lookup); + goto done; } +next: + intel_context_put(ce); + xa_lock(&guc->context_lookup); } +done: + xa_unlock_irqrestore(&guc->context_lookup, flags); } void intel_guc_dump_active_requests(struct intel_engine_cs *engine, @@ -2839,23 +4107,34 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine, if (unlikely(!guc_submission_initialized(guc))) return; + xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - if (!intel_context_is_pinned(ce)) + if (!kref_get_unless_zero(&ce->ref)) continue; + xa_unlock(&guc->context_lookup); + + if (!intel_context_is_pinned(ce)) + goto next; + if (intel_engine_is_virtual(ce->engine)) { if (!(ce->engine->mask & engine->mask)) - continue; + goto next; } else { if (ce->engine != engine) - continue; + goto next; } - spin_lock_irqsave(&ce->guc_active.lock, flags); - intel_engine_dump_active_requests(&ce->guc_active.requests, + spin_lock(&ce->guc_state.lock); + intel_engine_dump_active_requests(&ce->guc_state.requests, hung_rq, m); - spin_unlock_irqrestore(&ce->guc_active.lock, flags); + spin_unlock(&ce->guc_state.lock); + +next: + intel_context_put(ce); + xa_lock(&guc->context_lookup); } + xa_unlock_irqrestore(&guc->context_lookup, flags); } void intel_guc_submission_print_info(struct intel_guc *guc, @@ -2881,7 +4160,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc, priolist_for_each_request(rq, pl) drm_printf(p, "guc_id=%u, seqno=%llu\n", - rq->context->guc_id, + rq->context->guc_id.id, rq->fence.seqno); } spin_unlock_irqrestore(&sched_engine->lock, flags); @@ -2893,46 +4172,348 @@ static inline void guc_log_context_priority(struct drm_printer *p, { int i; - drm_printf(p, "\t\tPriority: %d\n", - ce->guc_prio); + drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio); drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; i < GUC_CLIENT_PRIORITY_NUM; ++i) { drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n", - i, ce->guc_prio_count[i]); + i, ce->guc_state.prio_count[i]); } drm_printf(p, "\n"); } +static inline void guc_log_context(struct drm_printer *p, + struct intel_context *ce) +{ + drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); + drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); + drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", + ce->ring->head, + ce->lrc_reg_state[CTX_RING_HEAD]); + drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", + ce->ring->tail, + ce->lrc_reg_state[CTX_RING_TAIL]); + drm_printf(p, "\t\tContext Pin Count: %u\n", + atomic_read(&ce->pin_count)); + drm_printf(p, "\t\tGuC ID Ref Count: %u\n", + atomic_read(&ce->guc_id.ref)); + drm_printf(p, "\t\tSchedule State: 0x%x\n\n", + ce->guc_state.sched_state); +} + void intel_guc_submission_print_context_info(struct intel_guc *guc, struct drm_printer *p) { struct intel_context *ce; unsigned long index; + unsigned long flags; + xa_lock_irqsave(&guc->context_lookup, flags); xa_for_each(&guc->context_lookup, index, ce) { - drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id); - drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); - drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", - ce->ring->head, - ce->lrc_reg_state[CTX_RING_HEAD]); - drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", - ce->ring->tail, - ce->lrc_reg_state[CTX_RING_TAIL]); - drm_printf(p, "\t\tContext Pin Count: %u\n", - atomic_read(&ce->pin_count)); - drm_printf(p, "\t\tGuC ID Ref Count: %u\n", - atomic_read(&ce->guc_id_ref)); - drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n", - ce->guc_state.sched_state, - atomic_read(&ce->guc_sched_state_no_lock)); + GEM_BUG_ON(intel_context_is_child(ce)); + guc_log_context(p, ce); guc_log_context_priority(p, ce); + + if (intel_context_is_parent(ce)) { + struct guc_process_desc *desc = __get_process_desc(ce); + struct intel_context *child; + + drm_printf(p, "\t\tNumber children: %u\n", + ce->parallel.number_children); + drm_printf(p, "\t\tWQI Head: %u\n", + READ_ONCE(desc->head)); + drm_printf(p, "\t\tWQI Tail: %u\n", + READ_ONCE(desc->tail)); + drm_printf(p, "\t\tWQI Status: %u\n\n", + READ_ONCE(desc->wq_status)); + + if (ce->engine->emit_bb_start == + emit_bb_start_parent_no_preempt_mid_batch) { + u8 i; + + drm_printf(p, "\t\tChildren Go: %u\n\n", + get_children_go_value(ce)); + for (i = 0; i < ce->parallel.number_children; ++i) + drm_printf(p, "\t\tChildren Join: %u\n", + get_children_join_value(ce, i)); + } + + for_each_child(ce, child) + guc_log_context(p, child); + } + } + xa_unlock_irqrestore(&guc->context_lookup, flags); +} + +static inline u32 get_children_go_addr(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + return i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce) + + offsetof(struct parent_scratch, go.semaphore); +} + +static inline u32 get_children_join_addr(struct intel_context *ce, + u8 child_index) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + return i915_ggtt_offset(ce->state) + + __get_parent_scratch_offset(ce) + + offsetof(struct parent_scratch, join[child_index].semaphore); +} + +#define PARENT_GO_BB 1 +#define PARENT_GO_FINI_BREADCRUMB 0 +#define CHILD_GO_BB 1 +#define CHILD_GO_FINI_BREADCRUMB 0 +static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + struct intel_context *ce = rq->context; + u32 *cs; + u8 i; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Wait on children */ + for (i = 0; i < ce->parallel.number_children; ++i) { + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = PARENT_GO_BB; + *cs++ = get_children_join_addr(ce, i); + *cs++ = 0; + } + + /* Turn off preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + + /* Tell children go */ + cs = gen8_emit_ggtt_write(cs, + CHILD_GO_BB, + get_children_go_addr(ce), + 0); + + /* Jump to batch */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + struct intel_context *ce = rq->context; + struct intel_context *parent = intel_context_to_parent(ce); + u32 *cs; + + GEM_BUG_ON(!intel_context_is_child(ce)); + + cs = intel_ring_begin(rq, 12); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Signal parent */ + cs = gen8_emit_ggtt_write(cs, + PARENT_GO_BB, + get_children_join_addr(parent, + ce->parallel.child_index), + 0); + + /* Wait on parent for go */ + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = CHILD_GO_BB; + *cs++ = get_children_go_addr(parent); + *cs++ = 0; + + /* Turn off preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* Jump to batch */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + intel_ring_advance(rq, cs); + + return 0; +} + +static u32 * +__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + u8 i; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + /* Wait on children */ + for (i = 0; i < ce->parallel.number_children; ++i) { + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = PARENT_GO_FINI_BREADCRUMB; + *cs++ = get_children_join_addr(ce, i); + *cs++ = 0; + } + + /* Turn on preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + /* Tell children go */ + cs = gen8_emit_ggtt_write(cs, + CHILD_GO_FINI_BREADCRUMB, + get_children_go_addr(ce), + 0); + + return cs; +} + +/* + * If this true, a submission of multi-lrc requests had an error and the + * requests need to be skipped. The front end (execuf IOCTL) should've called + * i915_request_skip which squashes the BB but we still need to emit the fini + * breadrcrumbs seqno write. At this point we don't know how many of the + * requests in the multi-lrc submission were generated so we can't do the + * handshake between the parent and children (e.g. if 4 requests should be + * generated but 2nd hit an error only 1 would be seen by the GuC backend). + * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error + * has occurred on any of the requests in submission / relationship. + */ +static inline bool skip_handshake(struct i915_request *rq) +{ + return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags); +} + +static u32 * +emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + if (unlikely(skip_handshake(rq))) { + /* + * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch, + * the -6 comes from the length of the emits below. + */ + memset(cs, 0, sizeof(u32) * + (ce->engine->emit_fini_breadcrumb_dw - 6)); + cs += ce->engine->emit_fini_breadcrumb_dw - 6; + } else { + cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs); } + + /* Emit fini breadcrumb */ + cs = gen8_emit_ggtt_write(cs, + rq->fence.seqno, + i915_request_active_timeline(rq)->hwsp_offset, + 0); + + /* User interrupt */ + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + + return cs; +} + +static u32 * +__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + struct intel_context *parent = intel_context_to_parent(ce); + + GEM_BUG_ON(!intel_context_is_child(ce)); + + /* Turn on preemption */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + /* Signal parent */ + cs = gen8_emit_ggtt_write(cs, + PARENT_GO_FINI_BREADCRUMB, + get_children_join_addr(parent, + ce->parallel.child_index), + 0); + + /* Wait parent on for go */ + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD); + *cs++ = CHILD_GO_FINI_BREADCRUMB; + *cs++ = get_children_go_addr(parent); + *cs++ = 0; + + return cs; +} + +static u32 * +emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq, + u32 *cs) +{ + struct intel_context *ce = rq->context; + + GEM_BUG_ON(!intel_context_is_child(ce)); + + if (unlikely(skip_handshake(rq))) { + /* + * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch, + * the -6 comes from the length of the emits below. + */ + memset(cs, 0, sizeof(u32) * + (ce->engine->emit_fini_breadcrumb_dw - 6)); + cs += ce->engine->emit_fini_breadcrumb_dw - 6; + } else { + cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs); + } + + /* Emit fini breadcrumb */ + cs = gen8_emit_ggtt_write(cs, + rq->fence.seqno, + i915_request_active_timeline(rq)->hwsp_offset, + 0); + + /* User interrupt */ + *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_NOOP; + + rq->tail = intel_ring_offset(rq, cs); + + return cs; } static struct intel_context * -guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count) +guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count, + unsigned long flags) { struct guc_virtual_engine *ve; struct intel_guc *guc; @@ -2981,6 +4562,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count) } ve->base.mask |= sibling->mask; + ve->base.logical_mask |= sibling->logical_mask; if (n != 0 && ve->base.class != sibling->class) { DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", @@ -3036,3 +4618,8 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve) return false; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_guc.c" +#include "selftest_guc_multi_lrc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c7ef44fa0c36..5a95a9f0a8e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, void intel_guc_dump_active_requests(struct intel_engine_cs *engine, struct i915_request *hung_rq, struct drm_printer *m); +void intel_guc_busyness_park(struct intel_gt *gt); +void intel_guc_busyness_unpark(struct intel_gt *gt); bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index fc5387b410a2..ff4b6869b80b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -87,17 +87,25 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) vma->obj, true)); if (IS_ERR(vaddr)) { i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); + err = PTR_ERR(vaddr); + goto unpin_out; } copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); - GEM_BUG_ON(copied < huc->fw.rsa_size); - i915_gem_object_unpin_map(vma->obj); + if (copied < huc->fw.rsa_size) { + err = -ENOMEM; + goto unpin_out; + } + huc->rsa_data = vma; return 0; + +unpin_out: + i915_vma_unpin_and_release(&vma, 0); + return err; } static void intel_huc_rsa_data_destroy(struct intel_huc *huc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c index 5733c15fd123..15998963b863 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c @@ -5,7 +5,7 @@ #include <drm/drm_print.h> -#include "gt/debugfs_gt.h" +#include "gt/intel_gt_debugfs.h" #include "intel_huc.h" #include "intel_huc_debugfs.h" @@ -21,11 +21,11 @@ static int huc_info_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(huc_info); void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "huc_info", &huc_info_fops, NULL }, }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index b104fb7607eb..8f17005ce85f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -35,7 +35,7 @@ static void uc_expand_default_options(struct intel_uc *uc) } /* Intermediate platforms are HuC authentication only */ - if (IS_DG1(i915) || IS_ALDERLAKE_S(i915)) { + if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) { i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; return; } @@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc) __uc_free_load_err_log(uc); } -static inline bool guc_communication_enabled(struct intel_guc *guc) -{ - return intel_guc_ct_enabled(&guc->ct); -} - /* * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * register using the same bits used in the CT message payload. Since our @@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc) { /* we need communication to be enabled to reply to GuC */ - GEM_BUG_ON(!guc_communication_enabled(guc)); + GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); spin_lock_irq(&guc->irq_lock); if (guc->mmio_msg) { @@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc) struct drm_i915_private *i915 = gt->i915; int ret; - GEM_BUG_ON(guc_communication_enabled(guc)); + GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); ret = i915_inject_probe_error(i915, -ENXIO); if (ret) @@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) return 0; /* Make sure we enable communication if and only if it's disabled */ - GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); + GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); if (enable_communication) guc_enable_communication(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c index 089d98662f46..c2f7924295e7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c @@ -6,7 +6,7 @@ #include <linux/debugfs.h> #include <drm/drm_print.h> -#include "gt/debugfs_gt.h" +#include "gt/intel_gt_debugfs.h" #include "intel_guc_debugfs.h" #include "intel_huc_debugfs.h" #include "intel_uc.h" @@ -32,11 +32,11 @@ static int uc_usage_show(struct seq_file *m, void *data) return 0; } -DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage); +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage); void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) { - static const struct debugfs_gt_file files[] = { + static const struct intel_gt_debugfs_file files[] = { { "usage", &uc_usage_fops, NULL }, }; struct dentry *root; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 3a16d08608a5..3aa87be4f2e4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -7,6 +7,7 @@ #include <linux/firmware.h> #include <drm/drm_print.h> +#include "gem/i915_gem_lmem.h" #include "intel_uc_fw.h" #include "intel_uc_fw_abi.h" #include "i915_drv.h" @@ -50,6 +51,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ + fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \ fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \ fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \ @@ -370,7 +372,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) uc_fw->private_data_size = css->private_data_size; - obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); + if (HAS_LMEM(i915)) { + obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size); + if (!IS_ERR(obj)) + obj->flags |= I915_BO_ALLOC_PM_EARLY; + } else { + obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); + } + if (IS_ERR(obj)) { err = PTR_ERR(obj); goto fail; @@ -413,20 +422,25 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw) { struct drm_i915_gem_object *obj = uc_fw->obj; struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt; - struct i915_vma dummy = { - .node.start = uc_fw_ggtt_offset(uc_fw), - .node.size = obj->base.size, - .pages = obj->mm.pages, - .vm = &ggtt->vm, - }; + struct i915_vma *dummy = &uc_fw->dummy; + u32 pte_flags = 0; + + dummy->node.start = uc_fw_ggtt_offset(uc_fw); + dummy->node.size = obj->base.size; + dummy->pages = obj->mm.pages; + dummy->vm = &ggtt->vm; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); + GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size); /* uc_fw->obj cache domains were not controlled across suspend */ - drm_clflush_sg(dummy.pages); + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(dummy->pages); - ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); + if (i915_gem_object_is_lmem(obj)) + pte_flags |= PTE_LM; + + ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags); } static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw) @@ -585,13 +599,68 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) */ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) { - struct sg_table *pages = uc_fw->obj->mm.pages; + struct intel_memory_region *mr = uc_fw->obj->mm.region; u32 size = min_t(u32, uc_fw->rsa_size, max_len); u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; + struct sgt_iter iter; + size_t count = 0; + int idx; + /* Called during reset handling, must be atomic [no fs_reclaim] */ GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); - return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset); + idx = offset >> PAGE_SHIFT; + offset = offset_in_page(offset); + if (i915_gem_object_has_struct_page(uc_fw->obj)) { + struct page *page; + + for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) { + u32 len = min_t(u32, size, PAGE_SIZE - offset); + void *vaddr; + + if (idx > 0) { + idx--; + continue; + } + + vaddr = kmap_atomic(page); + memcpy(dst, vaddr + offset, len); + kunmap_atomic(vaddr); + + offset = 0; + dst += len; + size -= len; + count += len; + if (!size) + break; + } + } else { + dma_addr_t addr; + + for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) { + u32 len = min_t(u32, size, PAGE_SIZE - offset); + void __iomem *vaddr; + + if (idx > 0) { + idx--; + continue; + } + + vaddr = io_mapping_map_atomic_wc(&mr->iomap, + addr - mr->region.start); + memcpy_fromio(dst, vaddr + offset, len); + io_mapping_unmap_atomic(vaddr); + + offset = 0; + dst += len; + size -= len; + count += len; + if (!size) + break; + } + } + + return count; } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 99bb1fe1af66..1e00bf65639e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -10,6 +10,7 @@ #include "intel_uc_fw_abi.h" #include "intel_device_info.h" #include "i915_gem.h" +#include "i915_vma.h" struct drm_printer; struct drm_i915_private; @@ -75,6 +76,14 @@ struct intel_uc_fw { bool user_overridden; size_t size; struct drm_i915_gem_object *obj; + /** + * @dummy: A vma used in binding the uc fw to ggtt. We can't define this + * vma on the stack as it can lead to a stack overflow, so we define it + * here. Safe to have 1 copy per uc fw because the binding is single + * threaded as it done during driver load (inherently single threaded) + * or during a GT reset (mutex guarantees single threaded). + */ + struct i915_vma dummy; /* * The firmware build process will generate a version header file with major and diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c new file mode 100644 index 000000000000..fb0e4a7bd8ca --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright �� 2021 Intel Corporation + */ + +#include "selftests/intel_scheduler_helpers.h" + +static struct i915_request *nop_user_request(struct intel_context *ce, + struct i915_request *from) +{ + struct i915_request *rq; + int ret; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + if (from) { + ret = i915_sw_fence_await_dma_fence(&rq->submit, + &from->fence, 0, + I915_FENCE_GFP); + if (ret < 0) { + i915_request_put(rq); + return ERR_PTR(ret); + } + } + + i915_request_get(rq); + i915_request_add(rq); + + return rq; +} + +static int intel_guc_scrub_ctbs(void *arg) +{ + struct intel_gt *gt = arg; + int ret = 0; + int i; + struct i915_request *last[3] = {NULL, NULL, NULL}, *rq; + intel_wakeref_t wakeref; + struct intel_engine_cs *engine; + struct intel_context *ce; + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + engine = intel_selftest_find_any_engine(gt); + + /* Submit requests and inject errors forcing G2H to be dropped */ + for (i = 0; i < 3; ++i) { + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + pr_err("Failed to create context, %d: %d\n", i, ret); + goto err; + } + + switch (i) { + case 0: + ce->drop_schedule_enable = true; + break; + case 1: + ce->drop_schedule_disable = true; + break; + case 2: + ce->drop_deregister = true; + break; + } + + rq = nop_user_request(ce, NULL); + intel_context_put(ce); + + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + pr_err("Failed to create request, %d: %d\n", i, ret); + goto err; + } + + last[i] = rq; + } + + for (i = 0; i < 3; ++i) { + ret = i915_request_wait(last[i], 0, HZ); + if (ret < 0) { + pr_err("Last request failed to complete: %d\n", ret); + goto err; + } + i915_request_put(last[i]); + last[i] = NULL; + } + + /* Force all H2G / G2H to be submitted / processed */ + intel_gt_retire_requests(gt); + msleep(500); + + /* Scrub missing G2H */ + intel_gt_handle_error(engine->gt, -1, 0, "selftest reset"); + + /* GT will not idle if G2H are lost */ + ret = intel_gt_wait_for_idle(gt, HZ); + if (ret < 0) { + pr_err("GT failed to idle: %d\n", ret); + goto err; + } + +err: + for (i = 0; i < 3; ++i) + if (last[i]) + i915_request_put(last[i]); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + + return ret; +} + +int intel_guc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_guc_scrub_ctbs), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + if (!intel_uc_uses_guc_submission(>->uc)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c new file mode 100644 index 000000000000..50953c8e8b53 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright �� 2019 Intel Corporation + */ + +#include "selftests/igt_spinner.h" +#include "selftests/igt_reset.h" +#include "selftests/intel_scheduler_helpers.h" +#include "gt/intel_engine_heartbeat.h" +#include "gem/selftests/mock_context.h" + +static void logical_sort(struct intel_engine_cs **engines, int num_engines) +{ + struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1]; + int i, j; + + for (i = 0; i < num_engines; ++i) + for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) { + if (engines[j]->logical_mask & BIT(i)) { + sorted[i] = engines[j]; + break; + } + } + + memcpy(*engines, *sorted, + sizeof(struct intel_engine_cs *) * num_engines); +} + +static struct intel_context * +multi_lrc_create_parent(struct intel_gt *gt, u8 class, + unsigned long flags) +{ + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int i = 0; + + for_each_engine(engine, gt, id) { + if (engine->class != class) + continue; + + siblings[i++] = engine; + } + + if (i <= 1) + return ERR_PTR(0); + + logical_sort(siblings, i); + + return intel_engine_create_parallel(siblings, 1, i); +} + +static void multi_lrc_context_unpin(struct intel_context *ce) +{ + struct intel_context *child; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); +} + +static void multi_lrc_context_put(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_parent(ce)); + + /* + * Only the parent gets the creation ref put in the uAPI, the parent + * itself is responsible for creation ref put on the children. + */ + intel_context_put(ce); +} + +static struct i915_request * +multi_lrc_nop_request(struct intel_context *ce) +{ + struct intel_context *child; + struct i915_request *rq, *child_rq; + int i = 0; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + for_each_child(ce, child) { + child_rq = intel_context_create_request(child); + if (IS_ERR(child_rq)) + goto child_error; + + if (++i == ce->parallel.number_children) + set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, + &child_rq->fence.flags); + i915_request_add(child_rq); + } + + return rq; + +child_error: + i915_request_put(rq); + + return ERR_PTR(-ENOMEM); +} + +static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class) +{ + struct intel_context *parent; + struct i915_request *rq; + int ret; + + parent = multi_lrc_create_parent(gt, class, 0); + if (IS_ERR(parent)) { + pr_err("Failed creating contexts: %ld", PTR_ERR(parent)); + return PTR_ERR(parent); + } else if (!parent) { + pr_debug("Not enough engines in class: %d", class); + return 0; + } + + rq = multi_lrc_nop_request(parent); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + pr_err("Failed creating requests: %d", ret); + goto out; + } + + ret = intel_selftest_wait_for_rq(rq); + if (ret) + pr_err("Failed waiting on request: %d", ret); + + i915_request_put(rq); + + if (ret >= 0) { + ret = intel_gt_wait_for_idle(gt, HZ * 5); + if (ret < 0) + pr_err("GT failed to idle: %d\n", ret); + } + +out: + multi_lrc_context_unpin(parent); + multi_lrc_context_put(parent); + return ret; +} + +static int intel_guc_multi_lrc_basic(void *arg) +{ + struct intel_gt *gt = arg; + unsigned int class; + int ret; + + for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { + ret = __intel_guc_multi_lrc_basic(gt, class); + if (ret) + return ret; + } + + return 0; +} + +int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(intel_guc_multi_lrc_basic), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + if (!intel_uc_uses_guc_submission(>->uc)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 11a8baba6822..9ec064199364 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -427,7 +427,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->tiled = !!(val & SPRITE_TILED); color_order = !!(val & SPRITE_RGB_ORDER_RGBX); - yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >> + yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e5c2fdfc20e3..99d1781fa5f0 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -446,17 +446,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & _PAGE_PRESENT); + return (e->val64 & GEN8_PAGE_PRESENT); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~_PAGE_PRESENT; + e->val64 &= ~GEN8_PAGE_PRESENT; } static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) { - e->val64 |= _PAGE_PRESENT; + e->val64 |= GEN8_PAGE_PRESENT; } static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) @@ -745,7 +745,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); @@ -849,7 +849,7 @@ retry: */ spt->shadow_page.type = type; daddr = dma_map_page(kdev, spt->shadow_page.page, - 0, 4096, PCI_DMA_BIDIRECTIONAL); + 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(kdev, daddr)) { gvt_vgpu_err("fail to map dma addr\n"); ret = -EINVAL; @@ -865,7 +865,7 @@ retry: return spt; err_unmap_dma: - dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL); err_free_spt: free_spt(spt); return ERR_PTR(ret); @@ -2409,8 +2409,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, return -ENOMEM; } - daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, - 4096, PCI_DMA_BIDIRECTIONAL); + daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_vgpu_err("fail to dmamap scratch_pt\n"); __free_page(virt_to_page(scratch_pt)); @@ -2440,7 +2439,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, /* The entry parameters like present/writeable/cache type * set to the same as i915's scratch page tree. */ - se.val64 |= _PAGE_PRESENT | _PAGE_RW; + se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) se.val64 |= PPAT_CACHED; @@ -2461,7 +2460,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) if (vgpu->gtt.scratch_pt[i].page != NULL) { daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << I915_GTT_PAGE_SHIFT); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page_mfn = 0; @@ -2741,7 +2740,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) } daddr = dma_map_page(dev, virt_to_page(page), 0, - 4096, PCI_DMA_BIDIRECTIONAL); + 4096, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, daddr)) { gvt_err("fail to dmamap scratch ggtt page\n"); __free_page(virt_to_page(page)); @@ -2755,7 +2754,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); return ret; } @@ -2779,7 +2778,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); - dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); __free_page(gvt->gtt.scratch_page); @@ -2897,7 +2896,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_low; idx++) { pte = mm->ggtt_mm.host_ggtt_aperture[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } @@ -2905,7 +2904,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_hi; idx++) { pte = mm->ggtt_mm.host_ggtt_hidden[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 7efa386449d1..20b82fb036f8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -328,7 +328,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, return ret; /* Setup DMA mapping. */ - *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); + *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, *dma_addr)) { gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", page_to_pfn(page), ret); @@ -344,7 +344,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, { struct device *dev = vgpu->gvt->gt->i915->drm.dev; - dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL); gvt_unpin_guest_page(vgpu, gfn, size); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index b56a8e37a3cd..6c804102528b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -576,7 +576,7 @@ retry: /* No one is going to touch shadow bb from now on. */ i915_gem_object_flush_map(bb->obj); - i915_gem_object_unlock(bb->obj); + i915_gem_ww_ctx_fini(&ww); } } return 0; @@ -630,7 +630,7 @@ retry: return ret; } - i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); + i915_gem_ww_ctx_fini(&ww); /* FIXME: we are not tracking our pinned VMA leaving it * up to the core to fix up the stray pin_count upon @@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) enum intel_engine_id i; int ret; - ppgtt = i915_ppgtt_create(&i915->gt); + ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index c149f348a972..b02a78ac87db 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -15,8 +15,6 @@ #include <linux/rcupdate.h> #include <linux/workqueue.h> -#include "i915_utils.h" - struct i915_active_fence { struct dma_fence __rcu *fence; struct dma_fence_cb cb; diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index 7b274c51cac0..6e2ad68f8f3f 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -4,6 +4,7 @@ */ #include <linux/kmemleak.h> +#include <linux/sizes.h> #include "i915_buddy.h" @@ -82,6 +83,7 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size) size = round_down(size, chunk_size); mm->size = size; + mm->avail = size; mm->chunk_size = chunk_size; mm->max_order = ilog2(size) - ilog2(chunk_size); @@ -155,6 +157,8 @@ void i915_buddy_fini(struct i915_buddy_mm *mm) i915_block_free(mm, mm->roots[i]); } + GEM_WARN_ON(mm->avail != mm->size); + kfree(mm->roots); kfree(mm->free_list); } @@ -230,6 +234,7 @@ void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block) { GEM_BUG_ON(!i915_buddy_block_is_allocated(block)); + mm->avail += i915_buddy_block_size(mm, block); __i915_buddy_free(mm, block); } @@ -283,6 +288,7 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order) } mark_allocated(block); + mm->avail -= i915_buddy_block_size(mm, block); kmemleak_update_trace(block); return block; @@ -368,6 +374,7 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm, } mark_allocated(block); + mm->avail -= i915_buddy_block_size(mm, block); list_add_tail(&block->link, &allocated); continue; } @@ -402,6 +409,44 @@ err_free: return err; } +void i915_buddy_block_print(struct i915_buddy_mm *mm, + struct i915_buddy_block *block, + struct drm_printer *p) +{ + u64 start = i915_buddy_block_offset(block); + u64 size = i915_buddy_block_size(mm, block); + + drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); +} + +void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p) +{ + int order; + + drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20); + + for (order = mm->max_order; order >= 0; order--) { + struct i915_buddy_block *block; + u64 count = 0, free; + + list_for_each_entry(block, &mm->free_list[order], link) { + GEM_BUG_ON(!i915_buddy_block_is_free(block)); + count++; + } + + drm_printf(p, "order-%d ", order); + + free = count * (mm->chunk_size << order); + if (free < SZ_1M) + drm_printf(p, "free: %lluKiB", free >> 10); + else + drm_printf(p, "free: %lluMiB", free >> 20); + + drm_printf(p, ", pages: %llu\n", count); + } +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_buddy.c" #endif diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h index 3940d632f208..7077742112ac 100644 --- a/drivers/gpu/drm/i915/i915_buddy.h +++ b/drivers/gpu/drm/i915/i915_buddy.h @@ -10,6 +10,8 @@ #include <linux/list.h> #include <linux/slab.h> +#include <drm/drm_print.h> + struct i915_buddy_block { #define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) #define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) @@ -69,6 +71,7 @@ struct i915_buddy_mm { /* Must be at least PAGE_SIZE */ u64 chunk_size; u64 size; + u64 avail; }; static inline u64 @@ -129,6 +132,11 @@ void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block); void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects); +void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p); +void i915_buddy_block_print(struct i915_buddy_mm *mm, + struct i915_buddy_block *block, + struct drm_printer *p); + void i915_buddy_module_exit(void); int i915_buddy_module_init(void); diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c index b79b5f6d2cfa..afb828dab53b 100644 --- a/drivers/gpu/drm/i915/i915_config.c +++ b/drivers/gpu/drm/i915/i915_config.c @@ -8,7 +8,7 @@ unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context) { - if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT)) + if (CONFIG_DRM_I915_FENCE_TIMEOUT && context) return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT); return 0; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 44969f5dde50..bafb902269de 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,13 +32,15 @@ #include <drm/drm_debugfs.h> #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_clock_utils.h" -#include "gt/intel_gt.h" +#include "gt/intel_gt_debugfs.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_pm_debugfs.h" #include "gt/intel_gt_requests.h" -#include "gt/intel_reset.h" #include "gt/intel_rc6.h" +#include "gt/intel_reset.h" #include "gt/intel_rps.h" #include "gt/intel_sseu_debugfs.h" @@ -46,9 +48,7 @@ #include "i915_debugfs_params.h" #include "i915_irq.h" #include "i915_scheduler.h" -#include "i915_trace.h" #include "intel_pm.h" -#include "intel_sideband.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -64,6 +64,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + i915_print_iommu_status(i915, &p); intel_gt_info_print(&i915->gt.info, &p); intel_driver_caps_print(&i915->caps, &p); @@ -139,7 +140,6 @@ void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct intel_engine_cs *engine; struct i915_vma *vma; int pin_count = 0; @@ -229,15 +229,12 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); if (i915_gem_object_is_framebuffer(obj)) seq_printf(m, " (fb)"); - - engine = i915_gem_object_last_write_engine(obj); - if (engine) - seq_printf(m, " (%s)", engine->name); } static int i915_gem_object_info(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct drm_printer p = drm_seq_file_printer(m); struct intel_memory_region *mr; enum intel_region_id id; @@ -246,8 +243,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) atomic_read(&i915->mm.free_count), i915->mm.shrink_memory); for_each_memory_region(mr, i915, id) - seq_printf(m, "%s: total:%pa, available:%pa bytes\n", - mr->name, &mr->total, &mr->avail); + intel_memory_region_debug(mr, &p); return 0; } @@ -354,232 +350,12 @@ static const struct file_operations i915_error_state_fops = { static int i915_frequency_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_uncore *uncore = &dev_priv->uncore; - struct intel_rps *rps = &dev_priv->gt.rps; - intel_wakeref_t wakeref; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - if (GRAPHICS_VER(dev_priv) == 5) { - u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); - u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); - - seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); - seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); - seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> - MEMSTAT_VID_SHIFT); - seq_printf(m, "Current P-state: %d\n", - (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - u32 rpmodectl, freq_sts; - - rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); - seq_printf(m, "Video Turbo Mode: %s\n", - yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); - seq_printf(m, "HW control enabled: %s\n", - yesno(rpmodectl & GEN6_RP_ENABLE)); - seq_printf(m, "SW control enabled: %s\n", - yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); - - vlv_punit_get(dev_priv); - freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - vlv_punit_put(dev_priv); - - seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); - seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); - - seq_printf(m, "actual GPU freq: %d MHz\n", - intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); - - seq_printf(m, "current GPU freq: %d MHz\n", - intel_gpu_freq(rps, rps->cur_freq)); - - seq_printf(m, "max GPU freq: %d MHz\n", - intel_gpu_freq(rps, rps->max_freq)); - - seq_printf(m, "min GPU freq: %d MHz\n", - intel_gpu_freq(rps, rps->min_freq)); - - seq_printf(m, "idle GPU freq: %d MHz\n", - intel_gpu_freq(rps, rps->idle_freq)); - - seq_printf(m, - "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(rps, rps->efficient_freq)); - } else if (GRAPHICS_VER(dev_priv) >= 6) { - u32 rp_state_limits; - u32 gt_perf_status; - u32 rp_state_cap; - u32 rpmodectl, rpinclimit, rpdeclimit; - u32 rpstat, cagf, reqf; - u32 rpupei, rpcurup, rpprevup; - u32 rpdownei, rpcurdown, rpprevdown; - u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; - int max_freq; - - rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS); - if (IS_GEN9_LP(dev_priv)) { - rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP); - gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS); - } else { - rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP); - gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS); - } - - /* RPSTAT1 is in the GT power well */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ); - if (GRAPHICS_VER(dev_priv) >= 9) - reqf >>= 23; - else { - reqf &= ~GEN6_TURBO_DISABLE; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - reqf >>= 24; - else - reqf >>= 25; - } - reqf = intel_gpu_freq(rps, reqf); - - rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); - rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD); - rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD); - - rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1); - rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; - rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; - rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; - rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; - rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; - rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; - cagf = intel_rps_read_actual_frequency(rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - - if (GRAPHICS_VER(dev_priv) >= 11) { - pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); - pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); - /* - * The equivalent to the PM ISR & IIR cannot be read - * without affecting the current state of the system - */ - pm_isr = 0; - pm_iir = 0; - } else if (GRAPHICS_VER(dev_priv) >= 8) { - pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2)); - pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2)); - pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2)); - pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2)); - } else { - pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER); - pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR); - pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR); - pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); - } - pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK); - - seq_printf(m, "Video Turbo Mode: %s\n", - yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); - seq_printf(m, "HW control enabled: %s\n", - yesno(rpmodectl & GEN6_RP_ENABLE)); - seq_printf(m, "SW control enabled: %s\n", - yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == - GEN6_RP_MEDIA_SW_MODE)); - - seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", - pm_ier, pm_imr, pm_mask); - if (GRAPHICS_VER(dev_priv) <= 10) - seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", - pm_isr, pm_iir); - seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", - rps->pm_intrmsk_mbz); - seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); - seq_printf(m, "Render p-state ratio: %d\n", - (gt_perf_status & (GRAPHICS_VER(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); - seq_printf(m, "Render p-state VID: %d\n", - gt_perf_status & 0xff); - seq_printf(m, "Render p-state limit: %d\n", - rp_state_limits & 0xff); - seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); - seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); - seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); - seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); - seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); - seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", - rpupei, - intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); - seq_printf(m, "RP CUR UP: %d (%lldun)\n", - rpcurup, - intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%lldns)\n", - rpprevup, - intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); - seq_printf(m, "Up threshold: %d%%\n", - rps->power.up_threshold); - - seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", - rpdownei, - intel_gt_pm_interval_to_ns(&dev_priv->gt, - rpdownei)); - seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", - rpcurdown, - intel_gt_pm_interval_to_ns(&dev_priv->gt, - rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", - rpprevdown, - intel_gt_pm_interval_to_ns(&dev_priv->gt, - rpprevdown)); - seq_printf(m, "Down threshold: %d%%\n", - rps->power.down_threshold); - - max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : - rp_state_cap >> 16) & 0xff; - max_freq *= (IS_GEN9_BC(dev_priv) || - GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); - - max_freq = (rp_state_cap & 0xff00) >> 8; - max_freq *= (IS_GEN9_BC(dev_priv) || - GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); - - max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : - rp_state_cap >> 0) & 0xff; - max_freq *= (IS_GEN9_BC(dev_priv) || - GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1); - seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - intel_gpu_freq(rps, max_freq)); - seq_printf(m, "Max overclocked frequency: %dMHz\n", - intel_gpu_freq(rps, rps->max_freq)); - - seq_printf(m, "Current freq: %d MHz\n", - intel_gpu_freq(rps, rps->cur_freq)); - seq_printf(m, "Actual freq: %d MHz\n", cagf); - seq_printf(m, "Idle freq: %d MHz\n", - intel_gpu_freq(rps, rps->idle_freq)); - seq_printf(m, "Min freq: %d MHz\n", - intel_gpu_freq(rps, rps->min_freq)); - seq_printf(m, "Boost freq: %d MHz\n", - intel_gpu_freq(rps, rps->boost_freq)); - seq_printf(m, "Max freq: %d MHz\n", - intel_gpu_freq(rps, rps->max_freq)); - seq_printf(m, - "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(rps, rps->efficient_freq)); - } else { - seq_puts(m, "no P-state info available\n"); - } + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_gt *gt = &i915->gt; + struct drm_printer p = drm_seq_file_printer(m); - seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); - seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); - seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); + intel_gt_pm_frequency_dump(gt, &p); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -778,36 +554,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) return 0; } -static int -i915_wedged_get(void *data, u64 *val) +static int i915_wedged_get(void *data, u64 *val) { struct drm_i915_private *i915 = data; - int ret = intel_gt_terminally_wedged(&i915->gt); - switch (ret) { - case -EIO: - *val = 1; - return 0; - case 0: - *val = 0; - return 0; - default: - return ret; - } + return intel_gt_debugfs_reset_show(&i915->gt, val); } -static int -i915_wedged_set(void *data, u64 val) +static int i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - /* Flush any previous reset before applying for a new one */ - wait_event(i915->gt.reset.queue, - !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); - - intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, - "Manually set wedged engine mask = %llx", val); - return 0; + return intel_gt_debugfs_reset_store(&i915->gt, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, @@ -952,27 +710,15 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct intel_gt *gt = &i915->gt; - atomic_inc(>->user_wakeref); - intel_gt_pm_get(gt); - if (GRAPHICS_VER(i915) >= 6) - intel_uncore_forcewake_user_get(gt->uncore); - - return 0; + return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt); } static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; - struct intel_gt *gt = &i915->gt; - if (GRAPHICS_VER(i915) >= 6) - intel_uncore_forcewake_user_put(&i915->uncore); - intel_gt_pm_put(gt); - atomic_dec(>->user_wakeref); - - return 0; + return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt); } static const struct file_operations i915_forcewake_fops = { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_driver.c index 59fb4c710c8c..e9125f14b3d1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -29,8 +29,8 @@ #include <linux/acpi.h> #include <linux/device.h> -#include <linux/oom.h> #include <linux/module.h> +#include <linux/oom.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -48,12 +48,14 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" -#include "display/intel_dmc.h" #include "display/intel_display_types.h" +#include "display/intel_dmc.h" #include "display/intel_dp.h" +#include "display/intel_dpt.h" #include "display/intel_fbdev.h" #include "display/intel_hotplug.h" #include "display/intel_overlay.h" +#include "display/intel_pch_refclk.h" #include "display/intel_pipe_crc.h" #include "display/intel_pps.h" #include "display/intel_sprite.h" @@ -67,7 +69,10 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" +#include "pxp/intel_pxp_pm.h" + #include "i915_debugfs.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_ioc32.h" #include "i915_irq.h" @@ -77,17 +82,16 @@ #include "i915_suspend.h" #include "i915_switcheroo.h" #include "i915_sysfs.h" -#include "i915_trace.h" #include "i915_vgpu.h" #include "intel_dram.h" #include "intel_gvt.h" #include "intel_memory_region.h" +#include "intel_pcode.h" #include "intel_pm.h" #include "intel_region_ttm.h" -#include "intel_sideband.h" #include "vlv_suspend.h" -static const struct drm_driver driver; +static const struct drm_driver i915_drm_driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { @@ -97,7 +101,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); if (!dev_priv->bridge_dev) { drm_err(&dev_priv->drm, "bridge device not found\n"); - return -1; + return -EIO; } return 0; } @@ -320,7 +324,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->audio.mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); mutex_init(&dev_priv->hdcp_comp_mutex); @@ -409,13 +413,18 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - if (i915_get_bridge_dev(dev_priv)) - return -EIO; + ret = i915_get_bridge_dev(dev_priv); + if (ret < 0) + return ret; - ret = intel_uncore_init_mmio(&dev_priv->uncore); + ret = intel_uncore_setup_mmio(&dev_priv->uncore); if (ret < 0) goto err_bridge; + ret = intel_uncore_init_mmio(&dev_priv->uncore); + if (ret) + goto err_mmio; + /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); @@ -432,6 +441,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) err_uncore: intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); +err_mmio: + intel_uncore_cleanup_mmio(&dev_priv->uncore); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -446,6 +457,7 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); + intel_uncore_cleanup_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); } @@ -588,8 +600,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pci_set_master(pdev); - intel_gt_init_workarounds(dev_priv); - /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -730,6 +740,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_gem_driver_unregister(dev_priv); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) +{ + drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915))); +} + static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { @@ -745,6 +761,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + i915_print_iommu_status(dev_priv, &p); intel_gt_info_print(&dev_priv->gt.info, &p); } @@ -765,7 +782,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *device_info; struct drm_i915_private *i915; - i915 = devm_drm_dev_alloc(&pdev->dev, &driver, + i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, struct drm_i915_private, drm); if (IS_ERR(i915)) return i915; @@ -806,7 +823,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return PTR_ERR(i915); /* Disable nuclear pageflip by default on pre-ILK */ - if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5) + if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5) i915->drm.driver_features &= ~DRIVER_ATOMIC; /* @@ -1096,9 +1113,7 @@ static int i915_drm_prepare(struct drm_device *dev) * split out that work and pull it forward so that after point, * the GPU is not woken again. */ - i915_gem_suspend(i915); - - return 0; + return i915_gem_backup_suspend(i915); } static int i915_drm_suspend(struct drm_device *dev) @@ -1128,6 +1143,8 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_hw(dev_priv); + /* Must be called before GGTT is suspended. */ + intel_dpt_suspend(dev_priv); i915_ggtt_suspend(&dev_priv->ggtt); i915_save_display(dev_priv); @@ -1184,6 +1201,14 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) goto out; } + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + if (suspend_to_idle(dev_priv)) + pci_d3cold_disable(pdev); + pci_disable_device(pdev); /* * During hibernation on some platforms the BIOS may try to access @@ -1208,7 +1233,8 @@ out: return ret; } -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, + pm_message_t state) { int error; @@ -1244,6 +1270,8 @@ static int i915_drm_resume(struct drm_device *dev) drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); i915_ggtt_resume(&dev_priv->ggtt); + /* Must be called after GGTT is resumed. */ + intel_dpt_resume(dev_priv); intel_dmc_ucode_resume(dev_priv); @@ -1345,6 +1373,8 @@ static int i915_drm_resume_early(struct drm_device *dev) pci_set_master(pdev); + pci_d3cold_enable(pdev); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1365,7 +1395,7 @@ static int i915_drm_resume_early(struct drm_device *dev) return ret; } -int i915_resume_switcheroo(struct drm_i915_private *i915) +int i915_driver_resume_switcheroo(struct drm_i915_private *i915) { int ret; @@ -1521,6 +1551,7 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1566,6 +1597,12 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + pci_d3cold_disable(pdev); rpm->suspended = true; /* @@ -1604,6 +1641,7 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1616,6 +1654,7 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + pci_d3cold_enable(pdev); if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); @@ -1778,7 +1817,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; -static const struct drm_driver driver = { +static const struct drm_driver i915_drm_driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h new file mode 100644 index 000000000000..9ef8db4aa0a6 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_driver.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_DRIVER_H__ +#define __I915_DRIVER_H__ + +#include <linux/pm.h> + +struct pci_dev; +struct pci_device_id; +struct drm_i915_private; + +extern const struct dev_pm_ops i915_pm_ops; + +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_remove(struct drm_i915_private *i915); +void i915_driver_shutdown(struct drm_i915_private *i915); + +int i915_driver_resume_switcheroo(struct drm_i915_private *i915); +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); + +#endif /* __I915_DRIVER_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 005b1cec7007..58aa55bc7461 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -50,7 +50,6 @@ #include <linux/stackdepot.h> #include <linux/xarray.h> -#include <drm/intel-gtt.h> #include <drm/drm_gem.h> #include <drm/drm_auth.h> #include <drm/drm_cache.h> @@ -90,6 +89,7 @@ #include "intel_device_info.h" #include "intel_memory_region.h" #include "intel_pch.h" +#include "intel_pm_types.h" #include "intel_runtime_pm.h" #include "intel_step.h" #include "intel_uncore.h" @@ -117,30 +117,6 @@ struct drm_i915_gem_object; -enum hpd_pin { - HPD_NONE = 0, - HPD_TV = HPD_NONE, /* TV is known to be unreliable */ - HPD_CRT, - HPD_SDVO_B, - HPD_SDVO_C, - HPD_PORT_A, - HPD_PORT_B, - HPD_PORT_C, - HPD_PORT_D, - HPD_PORT_E, - HPD_PORT_TC1, - HPD_PORT_TC2, - HPD_PORT_TC3, - HPD_PORT_TC4, - HPD_PORT_TC5, - HPD_PORT_TC6, - - HPD_NUM_PINS -}; - -#define for_each_hpd_pin(__pin) \ - for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) - /* Threshold == 5 for long IRQs, 50 for short */ #define HPD_STORM_DEFAULT_THRESHOLD 50 @@ -191,8 +167,6 @@ struct i915_hotplug { I915_GEM_DOMAIN_VERTEX) struct drm_i915_private; -struct i915_mm_struct; -struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; @@ -323,15 +297,15 @@ struct intel_crtc; struct intel_limit; struct dpll; -struct drm_i915_display_funcs { - void (*get_cdclk)(struct drm_i915_private *dev_priv, - struct intel_cdclk_config *cdclk_config); - void (*set_cdclk)(struct drm_i915_private *dev_priv, - const struct intel_cdclk_config *cdclk_config, - enum pipe pipe); - int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); - int (*get_fifo_size)(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane); +/* functions used internal in intel_pm.c */ +struct drm_i915_clock_gating_funcs { + void (*init_clock_gating)(struct drm_i915_private *dev_priv); +}; + +/* functions used for watermark calcs for display. */ +struct drm_i915_wm_disp_funcs { + /* update_wm is for legacy wm management */ + void (*update_wm)(struct drm_i915_private *dev_priv); int (*compute_pipe_wm)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*compute_intermediate_wm)(struct intel_atomic_state *state, @@ -343,39 +317,9 @@ struct drm_i915_display_funcs { void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); - void (*update_wm)(struct intel_crtc *crtc); - int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); - u8 (*calc_voltage_level)(int cdclk); - /* Returns the active state of the crtc, and if the crtc is active, - * fills out the pipe-config with the hw state. */ - bool (*get_pipe_config)(struct intel_crtc *, - struct intel_crtc_state *); - void (*get_initial_plane_config)(struct intel_crtc *, - struct intel_initial_plane_config *); - int (*crtc_compute_clock)(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state); - void (*crtc_enable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*crtc_disable)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*commit_modeset_enables)(struct intel_atomic_state *state); - void (*commit_modeset_disables)(struct intel_atomic_state *state); - void (*audio_codec_enable)(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - void (*audio_codec_disable)(struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state); - void (*fdi_link_train)(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state); - void (*init_clock_gating)(struct drm_i915_private *dev_priv); - void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); - /* clock updates for mode set */ - /* cursor updates */ - /* render clock increase/decrease */ - /* display clock increase/decrease */ - /* pll clock increase/decrease */ +}; +struct intel_color_funcs { int (*color_check)(struct intel_crtc_state *crtc_state); /* * Program double buffered color management registers during @@ -394,102 +338,46 @@ struct drm_i915_display_funcs { void (*read_luts)(struct intel_crtc_state *crtc_state); }; +struct intel_cdclk_funcs { + void (*get_cdclk)(struct drm_i915_private *dev_priv, + struct intel_cdclk_config *cdclk_config); + void (*set_cdclk)(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *cdclk_config, + enum pipe pipe); + int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); + int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); + u8 (*calc_voltage_level)(int cdclk); +}; -#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ - -struct intel_fbc { - /* This is always the inner lock when overlapping with struct_mutex and - * it's the outer lock when overlapping with stolen_lock. */ - struct mutex lock; - unsigned int possible_framebuffer_bits; - unsigned int busy_bits; - struct intel_crtc *crtc; - - struct drm_mm_node compressed_fb; - struct drm_mm_node compressed_llb; - - u8 limit; - - bool false_color; - - bool active; - bool activated; - bool flip_pending; - - bool underrun_detected; - struct work_struct underrun_work; - - /* - * Due to the atomic rules we can't access some structures without the - * appropriate locking, so we cache information here in order to avoid - * these problems. - */ - struct intel_fbc_state_cache { - struct { - unsigned int mode_flags; - u32 hsw_bdw_pixel_rate; - } crtc; - - struct { - unsigned int rotation; - int src_w; - int src_h; - bool visible; - /* - * Display surface base address adjustement for - * pageflips. Note that on gen4+ this only adjusts up - * to a tile, offsets within a tile are handled in - * the hw itself (with the TILEOFF register). - */ - int adjusted_x; - int adjusted_y; - - u16 pixel_blend_mode; - } plane; +struct intel_hotplug_funcs { + void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); +}; - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - unsigned int fence_y_offset; - u16 gen9_wa_cfb_stride; - u16 interval; - s8 fence_id; - bool psr2_active; - } state_cache; +struct intel_fdi_funcs { + void (*fdi_link_train)(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); +}; - /* - * This structure contains everything that's relevant to program the - * hardware registers. When we want to figure out if we need to disable - * and re-enable FBC for a new configuration we just check if there's - * something different in the struct. The genx_fbc_activate functions - * are supposed to read from it in order to program the registers. - */ - struct intel_fbc_reg_params { - struct { - enum pipe pipe; - enum i9xx_plane_id i9xx_plane; - } crtc; +struct intel_dpll_funcs { + int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state); +}; - struct { - const struct drm_format_info *format; - unsigned int stride; - u64 modifier; - } fb; - - int cfb_size; - unsigned int fence_y_offset; - u16 gen9_wa_cfb_stride; - u16 interval; - s8 fence_id; - bool plane_visible; - } params; - - const char *no_fbc_reason; +struct drm_i915_display_funcs { + /* Returns the active state of the crtc, and if the crtc is active, + * fills out the pipe-config with the hw state. */ + bool (*get_pipe_config)(struct intel_crtc *, + struct intel_crtc_state *); + void (*get_initial_plane_config)(struct intel_crtc *, + struct intel_initial_plane_config *); + void (*crtc_enable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*crtc_disable)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*commit_modeset_enables)(struct intel_atomic_state *state); }; +#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ + /* * HIGH_RR is the highest eDP panel refresh rate read from EDID * LOW_RR is the lowest eDP panel refresh rate found from EDID @@ -526,7 +414,6 @@ struct i915_drrs { #define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8) struct intel_fbdev; -struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; @@ -636,22 +523,6 @@ i915_fence_timeout(const struct drm_i915_private *i915) /* Amount of PSF GV points, BSpec precisely defines this */ #define I915_NUM_PSF_GV_POINTS 3 -struct ddi_vbt_port_info { - /* Non-NULL if port present. */ - struct intel_bios_encoder_data *devdata; - - int max_tmds_clock; - - /* This is an index in the HDMI/DVI DDI buffer translation table. */ - u8 hdmi_level_shift; - u8 hdmi_level_shift_set:1; - - u8 alternate_aux_channel; - u8 alternate_ddc_pin; - - int dp_max_link_rate; /* 0 for not limited by VBT */ -}; - enum psr_lines_to_wait { PSR_0_LINES_TO_WAIT = 0, PSR_1_LINE_TO_WAIT, @@ -706,6 +577,7 @@ struct intel_vbt_data { struct { u16 pwm_freq_hz; + u16 brightness_precision_bits; bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ @@ -732,73 +604,10 @@ struct intel_vbt_data { struct list_head display_devices; - struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; + struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */ struct sdvo_device_mapping sdvo_mappings[2]; }; -enum intel_ddb_partitioning { - INTEL_DDB_PART_1_2, - INTEL_DDB_PART_5_6, /* IVB+ */ -}; - -struct ilk_wm_values { - u32 wm_pipe[3]; - u32 wm_lp[3]; - u32 wm_lp_spr[3]; - bool enable_fbc_wm; - enum intel_ddb_partitioning partitioning; -}; - -struct g4x_pipe_wm { - u16 plane[I915_MAX_PLANES]; - u16 fbc; -}; - -struct g4x_sr_wm { - u16 plane; - u16 cursor; - u16 fbc; -}; - -struct vlv_wm_ddl_values { - u8 plane[I915_MAX_PLANES]; -}; - -struct vlv_wm_values { - struct g4x_pipe_wm pipe[3]; - struct g4x_sr_wm sr; - struct vlv_wm_ddl_values ddl[3]; - u8 level; - bool cxsr; -}; - -struct g4x_wm_values { - struct g4x_pipe_wm pipe[2]; - struct g4x_sr_wm sr; - struct g4x_sr_wm hpll; - bool cxsr; - bool hpll_en; - bool fbc_en; -}; - -struct skl_ddb_entry { - u16 start, end; /* in number of blocks, 'end' is exclusive */ -}; - -static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) -{ - return entry->end - entry->start; -} - -static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, - const struct skl_ddb_entry *e2) -{ - if (e1->start == e2->start && e1->end == e2->end) - return true; - - return false; -} - struct i915_frontbuffer_tracking { spinlock_t lock; @@ -826,6 +635,30 @@ struct i915_selftest_stash { struct ida mock_region_instances; }; +/* intel_audio.c private */ +struct intel_audio_funcs; +struct intel_audio_private { + /* Display internal audio functions */ + const struct intel_audio_funcs *funcs; + + /* hda/i915 audio component */ + struct i915_audio_component *component; + bool component_registered; + /* mutex for audio/video sync */ + struct mutex mutex; + int power_refcount; + u32 freq_cntrl; + + /* Used to save the pipe-to-encoder mapping for audio */ + struct intel_encoder *encoder_map[I915_MAX_PIPES]; + + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe; +}; + struct drm_i915_private { struct drm_device drm; @@ -886,8 +719,6 @@ struct drm_i915_private { */ u32 gpio_mmio_base; - u32 hsw_psr_mmio_adjust; - /* MMIO base address for MIPI regs */ u32 mipi_mmio_base; @@ -918,7 +749,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct intel_fbc fbc; + struct intel_fbc *fbc; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -974,8 +805,29 @@ struct drm_i915_private { /* unbound hipri wq for page flips/plane updates */ struct workqueue_struct *flip_wq; + /* pm private clock gating functions */ + const struct drm_i915_clock_gating_funcs *clock_gating_funcs; + + /* pm display functions */ + const struct drm_i915_wm_disp_funcs *wm_disp; + + /* irq display functions */ + const struct intel_hotplug_funcs *hotplug_funcs; + + /* fdi display functions */ + const struct intel_fdi_funcs *fdi_funcs; + + /* display pll funcs */ + const struct intel_dpll_funcs *dpll_funcs; + /* Display functions */ - struct drm_i915_display_funcs display; + const struct drm_i915_display_funcs *display; + + /* Display internal color functions */ + const struct intel_color_funcs *color_funcs; + + /* Display CDCLK functions */ + const struct intel_cdclk_funcs *cdclk_funcs; /* PCH chipset type */ enum intel_pch pch_type; @@ -992,9 +844,6 @@ struct drm_i915_private { /* Kernel Modesetting */ - struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; - struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; - /** * dpll and cdclk state is protected by connection_mutex * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. @@ -1022,8 +871,6 @@ struct drm_i915_private { */ u8 active_pipes; - struct i915_wa_list gt_wa_list; - struct i915_frontbuffer_tracking fb_tracking; struct intel_atomic_helper { @@ -1062,17 +909,6 @@ struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; - /* hda/i915 audio component */ - struct i915_audio_component *audio_component; - bool audio_component_registered; - /** - * av_mutex - mutex for audio/video sync - * - */ - struct mutex av_mutex; - int audio_power_refcount; - u32 audio_freq_cntrl; - u32 fdi_rx_config; /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ @@ -1205,14 +1041,7 @@ struct drm_i915_private { bool ipc_enabled; - /* Used to save the pipe-to-encoder mapping for audio */ - struct intel_encoder *av_enc_map[I915_MAX_PIPES]; - - /* necessary resource sharing with HDMI LPE audio driver. */ - struct { - struct platform_device *platdev; - int irq; - } lpe_audio; + struct intel_audio_private audio; struct i915_pmu pmu; @@ -1305,15 +1134,15 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define IP_VER(ver, rel) ((ver) << 8 | (rel)) -#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver) -#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics_ver, \ - INTEL_INFO(i915)->graphics_rel) +#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) +#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ + INTEL_INFO(i915)->graphics.rel) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) -#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver) -#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media_ver, \ - INTEL_INFO(i915)->media_rel) +#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) +#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \ + INTEL_INFO(i915)->media.rel) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) @@ -1326,15 +1155,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) -#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step) +#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) +#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) #define IS_DISPLAY_STEP(__i915, since, until) \ (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) -#define IS_GT_STEP(__i915, since, until) \ - (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \ - INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until)) +#define IS_GRAPHICS_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ + INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) + +#define IS_MEDIA_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ + INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) static __always_inline unsigned int __platform_mask_index(const struct intel_runtime_info *info, @@ -1433,7 +1267,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) -#define IS_CANNONLAKE(dev_priv) 0 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) @@ -1448,6 +1281,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10) #define IS_DG2_G11(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) +#define IS_ADLS_RPLS(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1508,15 +1343,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TGL_Y(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until)) +#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) -#define IS_KBL_GT_STEP(dev_priv, since, until) \ - (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until)) +#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ + (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) -#define IS_JSL_EHL_GT_STEP(p, since, until) \ - (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until)) +#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ + (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1524,19 +1359,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_TIGERLAKE(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_TGL_UY_GT_STEP(__i915, since, until) \ +#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_TGL_GT_STEP(__i915, since, until) \ +#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_RKL_DISPLAY_STEP(p, since, until) \ (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) -#define IS_DG1_GT_STEP(p, since, until) \ - (IS_DG1(p) && IS_GT_STEP(p, since, until)) +#define IS_DG1_GRAPHICS_STEP(p, since, until) \ + (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_DG1_DISPLAY_STEP(p, since, until) \ (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1544,20 +1379,20 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_ALDERLAKE_S(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLS_GT_STEP(__i915, since, until) \ +#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_S(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLP_GT_STEP(__i915, since, until) \ +#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_XEHPSDV_GT_STEP(__i915, since, until) \ - (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until)) +#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ + (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) /* * DG2 hardware steppings are a bit unusual. The hardware design was forked @@ -1573,11 +1408,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, * and stepping-specific logic will be applied with a general DG2-wide stepping * number. */ -#define IS_DG2_GT_STEP(__i915, variant, since, until) \ +#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_DG2_DISP_STEP(__i915, since, until) \ +#define IS_DG2_DISPLAY_STEP(__i915, since, until) \ (IS_DG2(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) @@ -1665,6 +1500,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst) +#define HAS_DP20(dev_priv) (IS_DG2(dev_priv)) #define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl) #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) @@ -1673,7 +1509,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) #define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) -#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0) +#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -1702,6 +1538,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) +#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \ + INTEL_INFO(dev_priv)->has_pxp) && \ + VDBOX_MASK(&dev_priv->gt)) #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) @@ -1715,11 +1554,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask)) + +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) +#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) -#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12) +#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) /* Only valid when HAS_DISPLAY() is true */ #define INTEL_DISPLAY_ENABLED(dev_priv) \ @@ -1733,26 +1574,27 @@ static inline bool run_as_guest(void) #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ IS_ALDERLAKE_S(dev_priv)) -static inline bool intel_vtd_active(void) +static inline bool intel_vtd_active(struct drm_i915_private *i915) { -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped) + if (device_iommu_mapped(i915->drm.dev)) return true; -#endif /* Running as a guest, we assume the host is enforcing VT'd */ return run_as_guest(); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); + static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) { - return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(); + return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); } static inline bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { - return IS_BROXTON(i915) && intel_vtd_active(); + return IS_BROXTON(i915) && intel_vtd_active(i915); } static inline bool @@ -1761,16 +1603,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } -/* i915_drv.c */ -extern const struct dev_pm_ops i915_pm_ops; - -int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -void i915_driver_remove(struct drm_i915_private *i915); -void i915_driver_shutdown(struct drm_i915_private *i915); - -int i915_resume_switcheroo(struct drm_i915_private *i915); -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); - +/* i915_getparam.c */ int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1791,6 +1624,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) */ while (atomic_read(&i915->mm.free_count)) { flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); rcu_barrier(); } } @@ -1881,11 +1715,11 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) { struct i915_address_space *vm; - rcu_read_lock(); + xa_lock(&file_priv->vm_xa); vm = xa_load(&file_priv->vm_xa, id); - if (vm && !kref_get_unless_zero(&vm->ref)) - vm = NULL; - rcu_read_unlock(); + if (vm) + kref_get(&vm->ref); + xa_unlock(&file_priv->vm_xa); return vm; } @@ -1905,6 +1739,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm); struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *dev_priv, phys_addr_t size); +struct drm_i915_gem_object * +__i915_gem_object_create_internal(struct drm_i915_private *dev_priv, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -1944,14 +1782,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -/* i915_mm.c */ -int remap_io_mapping(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size, - struct io_mapping *iomap); -int remap_io_sg(struct vm_area_struct *vma, - unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase); - static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 590efc8b0265..527228d4da7e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (!i915_gem_object_has_struct_page(obj) || - cpu_write_needs_clflush(obj)) + i915_gem_cpu_write_needs_clflush(obj)) /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between * textures). Fallback to the shmem path in that case. @@ -1005,7 +1005,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->ops->adjust_lru(obj); } - if (i915_gem_object_has_pages(obj)) { + if (i915_gem_object_has_pages(obj) || + i915_gem_object_has_self_managed_shrink_list(obj)) { unsigned long flags; spin_lock_irqsave(&i915->mm.obj_lock, flags); @@ -1139,8 +1140,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) { intel_gt_driver_release(&dev_priv->gt); - intel_wa_list_free(&dev_priv->gt_wa_list); - intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_drain_freed_objects(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 36489be4896b..cd5f2348a187 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -30,7 +30,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, do { if (dma_map_sg_attrs(obj->base.dev->dev, pages->sgl, pages->nents, - PCI_DMA_BIDIRECTIONAL, + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN)) @@ -64,7 +64,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, usleep_range(100, 250); dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); } /** diff --git a/drivers/gpu/drm/i915/i915_gem_ww.h b/drivers/gpu/drm/i915/i915_gem_ww.h index f6b1a796667b..86f0fe343de6 100644 --- a/drivers/gpu/drm/i915/i915_gem_ww.h +++ b/drivers/gpu/drm/i915/i915_gem_ww.h @@ -11,8 +11,7 @@ struct i915_gem_ww_ctx { struct ww_acquire_ctx ctx; struct list_head obj_list; struct drm_i915_gem_object *contended; - unsigned short intr; - unsigned short loop; + bool intr; }; void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr); @@ -20,31 +19,23 @@ void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx); int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx); void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj); -/* Internal functions used by the inlines! Don't use. */ +/* Internal function used by the inlines! Don't use. */ static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err) { - ww->loop = 0; if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(ww); if (!err) - ww->loop = 1; + err = -EDEADLK; } - if (!ww->loop) + if (err != -EDEADLK) i915_gem_ww_ctx_fini(ww); return err; } -static inline void -__i915_gem_ww_init(struct i915_gem_ww_ctx *ww, bool intr) -{ - i915_gem_ww_ctx_init(ww, intr); - ww->loop = 1; -} - -#define for_i915_gem_ww(_ww, _err, _intr) \ - for (__i915_gem_ww_init(_ww, _intr); (_ww)->loop; \ - _err = __i915_gem_ww_fini(_ww, _err)) - +#define for_i915_gem_ww(_ww, _err, _intr) \ + for (i915_gem_ww_ctx_init(_ww, _intr), (_err) = -EDEADLK; \ + (_err) == -EDEADLK; \ + (_err) = __i915_gem_ww_fini(_ww, _err)) #endif diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9cf6ac575de1..96d2d99f5b98 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -48,8 +48,9 @@ #include "i915_gpu_error.h" #include "i915_memcpy.h" #include "i915_scatterlist.h" +#include "i915_vma_snapshot.h" -#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) static void __sg_set_buf(struct scatterlist *sg, @@ -275,16 +276,16 @@ static bool compress_start(struct i915_vma_compress *c) static void *compress_next_page(struct i915_vma_compress *c, struct i915_vma_coredump *dst) { - void *page; + void *page_addr; + struct page *page; - if (dst->page_count >= dst->num_pages) - return ERR_PTR(-ENOSPC); - - page = pool_alloc(&c->pool, ALLOW_FAIL); - if (!page) + page_addr = pool_alloc(&c->pool, ALLOW_FAIL); + if (!page_addr) return ERR_PTR(-ENOMEM); - return dst->pages[dst->page_count++] = page; + page = virt_to_page(page_addr); + list_add_tail(&page->lru, &dst->page_list); + return page_addr; } static int compress_page(struct i915_vma_compress *c, @@ -397,7 +398,7 @@ static int compress_page(struct i915_vma_compress *c, if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); - dst->pages[dst->page_count++] = ptr; + list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); cond_resched(); return 0; @@ -431,6 +432,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu; int slice; int subslice; + int iter; err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.instdone); @@ -444,19 +446,38 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, if (GRAPHICS_VER(m->i915) <= 6) return; - for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) - err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, - ee->instdone.sampler[slice][subslice]); + if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) { + for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice) + err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.sampler[slice][subslice]); - for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) - err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, - ee->instdone.row[slice][subslice]); + for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice) + err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.row[slice][subslice]); + } else { + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.sampler[slice][subslice]); + + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.row[slice][subslice]); + } if (GRAPHICS_VER(m->i915) < 12) return; + if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) { + for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice) + err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n", + slice, subslice, + ee->instdone.geom_svg[slice][subslice]); + } + err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", ee->instdone.slice_common_extra[0]); err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", @@ -594,7 +615,7 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; - int page; + struct page *page; if (!vma) return; @@ -608,16 +629,17 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); err_compression_marker(m); - for (page = 0; page < vma->page_count; page++) { + list_for_each_entry(page, &vma->page_list, lru) { int i, len; + const u32 *addr = page_address(page); len = PAGE_SIZE; - if (page == vma->page_count - 1) + if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) len -= vma->unused; len = ascii85_encode_len(len); for (i = 0; i < len; i++) - err_puts(m, ascii85_encode(vma->pages[page][i], out)); + err_puts(m, ascii85_encode(addr[i], out)); } err_puts(m, "\n"); } @@ -733,7 +755,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m, * only exists if the corresponding VCS engine is * present. */ - if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))) + if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || + !HAS_ENGINE(gt->_gt, _VCS(i * 2))) continue; err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, @@ -925,10 +948,12 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) { while (vma) { struct i915_vma_coredump *next = vma->next; - int page; + struct page *page, *n; - for (page = 0; page < vma->page_count; page++) - free_page((unsigned long)vma->pages[page]); + list_for_each_entry_safe(page, n, &vma->page_list, lru) { + list_del_init(&page->lru); + __free_page(page); + } kfree(vma); vma = next; @@ -988,25 +1013,21 @@ void __i915_gpu_coredump_free(struct kref *error_ref) static struct i915_vma_coredump * i915_vma_coredump_create(const struct intel_gt *gt, - const struct i915_vma *vma, - const char *name, + const struct i915_vma_snapshot *vsnap, struct i915_vma_compress *compress) { struct i915_ggtt *ggtt = gt->ggtt; const u64 slot = ggtt->error_capture.start; struct i915_vma_coredump *dst; - unsigned long num_pages; struct sgt_iter iter; int ret; might_sleep(); - if (!vma || !vma->pages || !compress) + if (!vsnap || !vsnap->pages || !compress) return NULL; - num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; - num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); + dst = kmalloc(sizeof(*dst), ALLOW_FAIL); if (!dst) return NULL; @@ -1015,14 +1036,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, return NULL; } - strcpy(dst->name, name); + INIT_LIST_HEAD(&dst->page_list); + strcpy(dst->name, vsnap->name); dst->next = NULL; - dst->gtt_offset = vma->node.start; - dst->gtt_size = vma->node.size; - dst->gtt_page_sizes = vma->page_sizes.gtt; - dst->num_pages = num_pages; - dst->page_count = 0; + dst->gtt_offset = vsnap->gtt_offset; + dst->gtt_size = vsnap->gtt_size; + dst->gtt_page_sizes = vsnap->page_sizes; dst->unused = 0; ret = -EINVAL; @@ -1030,7 +1050,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, void __iomem *s; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { mutex_lock(&ggtt->error_mutex); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); @@ -1048,11 +1068,11 @@ i915_vma_coredump_create(const struct intel_gt *gt, if (ret) break; } - } else if (__i915_gem_object_is_lmem(vma->obj)) { - struct intel_memory_region *mem = vma->obj->mm.region; + } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) { + struct intel_memory_region *mem = vsnap->mr; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { void __iomem *s; s = io_mapping_map_wc(&mem->iomap, @@ -1068,7 +1088,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } else { struct page *page; - for_each_sgt_page(page, iter, vma->pages) { + for_each_sgt_page(page, iter, vsnap->pages) { void *s; drm_clflush_pages(&page, 1); @@ -1085,8 +1105,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, } if (ret || compress_flush(compress, dst)) { - while (dst->page_count--) - pool_free(&compress->pool, dst->pages[dst->page_count]); + struct page *page, *n; + + list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { + list_del_init(&page->lru); + pool_free(&compress->pool, page_address(page)); + } + kfree(dst); dst = NULL; } @@ -1299,38 +1324,72 @@ static bool record_context(struct i915_gem_context_coredump *e, struct intel_engine_capture_vma { struct intel_engine_capture_vma *next; - struct i915_vma *vma; + struct i915_vma_snapshot *vsnap; char name[16]; + bool lockdep_cookie; }; static struct intel_engine_capture_vma * -capture_vma(struct intel_engine_capture_vma *next, - struct i915_vma *vma, - const char *name, - gfp_t gfp) +capture_vma_snapshot(struct intel_engine_capture_vma *next, + struct i915_vma_snapshot *vsnap, + gfp_t gfp) { struct intel_engine_capture_vma *c; - if (!vma) + if (!i915_vma_snapshot_present(vsnap)) return next; c = kmalloc(sizeof(*c), gfp); if (!c) return next; - if (!i915_active_acquire_if_busy(&vma->active)) { + if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) { kfree(c); return next; } - strcpy(c->name, name); - c->vma = vma; /* reference held while active */ + strcpy(c->name, vsnap->name); + c->vsnap = vsnap; + i915_vma_snapshot_get(vsnap); c->next = next; return c; } static struct intel_engine_capture_vma * +capture_vma(struct intel_engine_capture_vma *next, + struct i915_vma *vma, + const char *name, + gfp_t gfp) +{ + struct i915_vma_snapshot *vsnap; + + if (!vma) + return next; + + /* + * If the vma isn't pinned, then the vma should be snapshotted + * to a struct i915_vma_snapshot at command submission time. + * Not here. + */ + GEM_WARN_ON(!i915_vma_is_pinned(vma)); + if (!i915_vma_is_pinned(vma)) + return next; + + vsnap = i915_vma_snapshot_alloc(gfp); + if (!vsnap) + return next; + + i915_vma_snapshot_init(vsnap, vma, name); + next = capture_vma_snapshot(next, vsnap, gfp); + + /* FIXME: Replace on async unbind. */ + i915_vma_snapshot_put(vsnap); + + return next; +} + +static struct intel_engine_capture_vma * capture_user(struct intel_engine_capture_vma *capture, const struct i915_request *rq, gfp_t gfp) @@ -1338,7 +1397,7 @@ capture_user(struct intel_engine_capture_vma *capture, struct i915_capture_list *c; for (c = rq->capture_list; c; c = c->next) - capture = capture_vma(capture, c->vma, "user", gfp); + capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp); return capture; } @@ -1352,6 +1411,36 @@ static void add_vma(struct intel_engine_coredump *ee, } } +static struct i915_vma_coredump * +create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, + const char *name, struct i915_vma_compress *compress) +{ + struct i915_vma_coredump *ret = NULL; + struct i915_vma_snapshot tmp; + bool lockdep_cookie; + + if (!vma) + return NULL; + + i915_vma_snapshot_init_onstack(&tmp, vma, name); + if (i915_vma_snapshot_resource_pin(&tmp, &lockdep_cookie)) { + ret = i915_vma_coredump_create(gt, &tmp, compress); + i915_vma_snapshot_resource_unpin(&tmp, lockdep_cookie); + } + i915_vma_snapshot_put_onstack(&tmp); + + return ret; +} + +static void add_vma_coredump(struct intel_engine_coredump *ee, + const struct intel_gt *gt, + struct i915_vma *vma, + const char *name, + struct i915_vma_compress *compress) +{ + add_vma(ee, create_vma_coredump(gt, vma, name, compress)); +} + struct intel_engine_coredump * intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) { @@ -1385,7 +1474,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee, * as the simplest method to avoid being overwritten * by userspace. */ - vma = capture_vma(vma, rq->batch, "batch", gfp); + vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp); vma = capture_user(vma, rq, gfp); vma = capture_vma(vma, rq->ring->vma, "ring", gfp); vma = capture_vma(vma, rq->context->state, "HW context", gfp); @@ -1406,30 +1495,24 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, while (capture) { struct intel_engine_capture_vma *this = capture; - struct i915_vma *vma = this->vma; + struct i915_vma_snapshot *vsnap = this->vsnap; add_vma(ee, i915_vma_coredump_create(engine->gt, - vma, this->name, - compress)); + vsnap, compress)); - i915_active_release(&vma->active); + i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie); + i915_vma_snapshot_put(vsnap); capture = this->next; kfree(this); } - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->status_page.vma, - "HW Status", - compress)); + add_vma_coredump(ee, engine->gt, engine->status_page.vma, + "HW Status", compress); - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->wa_ctx.vma, - "WA context", - compress)); + add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, + "WA context", compress); } static struct intel_engine_coredump * @@ -1465,17 +1548,25 @@ capture_engine(struct intel_engine_cs *engine, } } if (rq) - capture = intel_engine_coredump_add_request(ee, rq, - ATOMIC_MAYFAIL); + rq = i915_request_get_rcu(rq); + + if (!rq) + goto no_request_capture; + + capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); if (!capture) { -no_request_capture: - kfree(ee); - return NULL; + i915_request_put(rq); + goto no_request_capture; } intel_engine_coredump_add_vma(ee, capture, compress); + i915_request_put(rq); return ee; + +no_request_capture: + kfree(ee); + return NULL; } static void @@ -1529,10 +1620,8 @@ gt_record_uc(struct intel_gt_coredump *gt, */ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); - error_uc->guc_log = - i915_vma_coredump_create(gt->_gt, - uc->guc.log.vma, "GuC log buffer", - compress); + error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, + "GuC log buffer", compress); return error_uc; } @@ -1612,7 +1701,8 @@ static void gt_record_regs(struct intel_gt_coredump *gt) * only exists if the corresponding VCS engine is * present. */ - if (!HAS_ENGINE(gt->_gt, _VCS(i * 2))) + if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 || + !HAS_ENGINE(gt->_gt, _VCS(i * 2))) continue; gt->sfc_done[i] = @@ -1728,10 +1818,7 @@ static void capture_gen(struct i915_gpu_coredump *error) error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); error->suspended = i915->runtime_pm.suspended; - error->iommu = -1; -#ifdef CONFIG_INTEL_IOMMU - error->iommu = intel_iommu_gfx_mapped; -#endif + error->iommu = intel_vtd_active(i915); error->reset_count = i915_reset_count(&i915->gpu_error); error->suspend_count = i915->suspend_count; @@ -1817,8 +1904,8 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt, kfree(compress); } -struct i915_gpu_coredump * -i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +static struct i915_gpu_coredump * +__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) { struct drm_i915_private *i915 = gt->i915; struct i915_gpu_coredump *error; @@ -1859,6 +1946,22 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) return error; } +struct i915_gpu_coredump * +i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +{ + static DEFINE_MUTEX(capture_mutex); + int ret = mutex_lock_interruptible(&capture_mutex); + struct i915_gpu_coredump *dump; + + if (ret) + return ERR_PTR(ret); + + dump = __i915_gpu_coredump(gt, engine_mask); + mutex_unlock(&capture_mutex); + + return dump; +} + void i915_error_state_store(struct i915_gpu_coredump *error) { struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index b98d8cdbe4f2..5aedf5129814 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -39,10 +39,8 @@ struct i915_vma_coredump { u64 gtt_size; u32 gtt_page_sizes; - int num_pages; - int page_count; int unused; - u32 *pages[]; + struct list_head page_list; }; struct i915_request_coredump { diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h new file mode 100644 index 000000000000..8f81b7603d37 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_IOSF_MBI_H__ +#define __I915_IOSF_MBI_H__ + +#if IS_ENABLED(CONFIG_IOSF_MBI) +#include <asm/iosf_mbi.h> +#else + +/* Stubs to compile for all non-x86 archs */ +#define MBI_PMIC_BUS_ACCESS_BEGIN 1 +#define MBI_PMIC_BUS_ACCESS_END 2 + +struct notifier_block; + +static inline void iosf_mbi_punit_acquire(void) {} +static inline void iosf_mbi_punit_release(void) {} +static inline void iosf_mbi_assert_punit_acquired(void) {} + +static inline +int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int +iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb) +{ + return 0; +} + +static inline +int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} +#endif + +#endif /* __I915_IOSF_MBI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9bc4f4a8e12e..5b98fb0532b5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -35,6 +35,7 @@ #include <drm/drm_drv.h> #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" #include "display/intel_fifo_underrun.h" #include "display/intel_hotplug.h" @@ -49,7 +50,6 @@ #include "i915_drv.h" #include "i915_irq.h" -#include "i915_trace.h" #include "intel_pm.h" /** @@ -224,7 +224,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) static void intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); drm_crtc_handle_vblank(&crtc->base); } @@ -359,9 +359,8 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ilk_update_display_irq(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask) +static void ilk_update_display_irq(struct drm_i915_private *dev_priv, + u32 interrupt_mask, u32 enabled_irq_mask) { u32 new_val; @@ -380,6 +379,16 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, } } +void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) +{ + ilk_update_display_irq(i915, bits, bits); +} + +void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) +{ + ilk_update_display_irq(i915, bits, 0); +} + /** * bdw_update_port_irq - update DE port interrupt * @dev_priv: driver private @@ -419,10 +428,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, - u32 interrupt_mask, - u32 enabled_irq_mask) +static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 interrupt_mask, + u32 enabled_irq_mask) { u32 new_val; @@ -444,15 +452,27 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, } } +void bdw_enable_pipe_irq(struct drm_i915_private *i915, + enum pipe pipe, u32 bits) +{ + bdw_update_pipe_irq(i915, pipe, bits, bits); +} + +void bdw_disable_pipe_irq(struct drm_i915_private *i915, + enum pipe pipe, u32 bits) +{ + bdw_update_pipe_irq(i915, pipe, bits, 0); +} + /** * ibx_display_interrupt_update - update SDEIMR * @dev_priv: driver private * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask) +static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + u32 interrupt_mask, + u32 enabled_irq_mask) { u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); sdeimr &= ~interrupt_mask; @@ -469,6 +489,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); } +void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) +{ + ibx_display_interrupt_update(i915, bits, bits); +} + +void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) +{ + ibx_display_interrupt_update(i915, bits, 0); +} + u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1288,7 +1318,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 crc2, u32 crc3, u32 crc4) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -1327,7 +1357,7 @@ display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, static void flip_done_handler(struct drm_i915_private *i915, enum pipe pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe); + struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); struct drm_crtc_state *crtc_state = crtc->base.state; struct drm_pending_vblank_event *e = crtc_state->event; struct drm_device *dev = &i915->drm; @@ -2093,22 +2123,6 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev_priv); - if (de_iir & DE_EDP_PSR_INT_HSW) { - struct intel_encoder *encoder; - - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - u32 psr_iir = intel_uncore_read(&dev_priv->uncore, - EDP_PSR_IIR); - - intel_psr_irq_handler(intel_dp, psr_iir); - intel_uncore_write(&dev_priv->uncore, - EDP_PSR_IIR, psr_iir); - break; - } - } - if (de_iir & DE_AUX_CHANNEL_A_IVB) dp_aux_irq_handler(dev_priv); @@ -2758,7 +2772,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; struct intel_gt *gt = &i915->gt; - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 master_tile_ctl, master_ctl; u32 gu_misc_iir; @@ -3002,7 +3016,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); @@ -3159,11 +3173,12 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3172,11 +3187,12 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; dg1_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3855,13 +3871,14 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev_priv); - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); gen11_de_irq_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3872,10 +3889,11 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3886,8 +3904,8 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) GEN11_DISPLAY_IRQ_ENABLE); } - dg1_master_intr_enable(dev_priv->uncore.regs); - intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR); + dg1_master_intr_enable(uncore->regs); + intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); } static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) @@ -4331,6 +4349,20 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) return ret; } +#define HPD_FUNCS(platform) \ +static const struct intel_hotplug_funcs platform##_hpd_funcs = { \ + .hpd_irq_setup = platform##_hpd_irq_setup, \ +} + +HPD_FUNCS(i915); +HPD_FUNCS(dg1); +HPD_FUNCS(gen11); +HPD_FUNCS(bxt); +HPD_FUNCS(icp); +HPD_FUNCS(spt); +HPD_FUNCS(ilk); +#undef HPD_FUNCS + /** * intel_irq_init - initializes irq support * @dev_priv: i915 device instance @@ -4381,20 +4413,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (HAS_GMCH(dev_priv)) { if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + dev_priv->hotplug_funcs = &i915_hpd_funcs; } else { if (HAS_PCH_DG1(dev_priv)) - dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; + dev_priv->hotplug_funcs = &dg1_hpd_funcs; else if (DISPLAY_VER(dev_priv) >= 11) - dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; + dev_priv->hotplug_funcs = &gen11_hpd_funcs; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; + dev_priv->hotplug_funcs = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup; + dev_priv->hotplug_funcs = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; + dev_priv->hotplug_funcs = &spt_hpd_funcs; else - dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; + dev_priv->hotplug_funcs = &ilk_hpd_funcs; } } diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index e43b6734f21b..0eb90d271fa7 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -9,9 +9,9 @@ #include <linux/ktime.h> #include <linux/types.h> -#include "display/intel_display.h" #include "i915_reg.h" +enum pipe; struct drm_crtc; struct drm_device; struct drm_display_mode; @@ -40,46 +40,15 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, u32 mask, u32 bits); -void ilk_update_display_irq(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask); -static inline void -ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits) -{ - ilk_update_display_irq(dev_priv, bits, bits); -} -static inline void -ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits) -{ - ilk_update_display_irq(dev_priv, bits, 0); -} -void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, - u32 interrupt_mask, - u32 enabled_irq_mask); -static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 bits) -{ - bdw_update_pipe_irq(dev_priv, pipe, bits, bits); -} -static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 bits) -{ - bdw_update_pipe_irq(dev_priv, pipe, bits, 0); -} -void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, - u32 interrupt_mask, - u32 enabled_irq_mask); -static inline void -ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) -{ - ibx_display_interrupt_update(dev_priv, bits, bits); -} -static inline void -ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) -{ - ibx_display_interrupt_update(dev_priv, bits, 0); -} + +void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits); +void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits); + +void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); +void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); + +void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits); +void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 666808cb3a32..7998bc74ab49 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -27,6 +27,7 @@ #include "i915_drv.h" +#include "i915_mm.h" struct remap_pfn { struct mm_struct *mm; @@ -37,17 +38,6 @@ struct remap_pfn { resource_size_t iobase; }; -static int remap_pfn(pte_t *pte, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - - /* Special PTE are not associated with any struct page */ - set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); - r->pfn++; - - return 0; -} - #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -77,6 +67,20 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + +#if IS_ENABLED(CONFIG_X86) +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + /** * remap_io_mapping - remap an IO mapping to userspace * @vma: user vma to map to @@ -94,7 +98,6 @@ int remap_io_mapping(struct vm_area_struct *vma, struct remap_pfn r; int err; -#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ @@ -111,6 +114,7 @@ int remap_io_mapping(struct vm_area_struct *vma, return 0; } +#endif /** * remap_io_sg - remap an IO mapping to userspace diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h new file mode 100644 index 000000000000..76f1d53bdf34 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_MM_H__ +#define __I915_MM_H__ + +#include <linux/types.h> + +struct vm_area_struct; +struct io_mapping; +struct scatterlist; + +#if IS_ENABLED(CONFIG_X86) +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); +#else +static inline +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + pr_err("Architecture has no %s() and shouldn't be calling this function\n", __func__); + WARN_ON_ONCE(1); + return 0; +} +#endif + +int remap_io_sg(struct vm_area_struct *vma, + unsigned long addr, unsigned long size, + struct scatterlist *sgl, resource_size_t iobase); + +#endif /* __I915_MM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index d8b4482c69d0..f6bcd2f89257 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -4,7 +4,7 @@ * Copyright © 2021 Intel Corporation */ -#include <linux/console.h> +#include <drm/drm_drv.h> #include "gem/i915_gem_context.h" #include "gem/i915_gem_object.h" @@ -24,14 +24,14 @@ static int i915_check_nomodeset(void) /* * Enable KMS by default, unless explicitly overriden by - * either the i915.modeset prarameter or by the - * vga_text_mode_force boot option. + * either the i915.modeset parameter or by the + * nomodeset boot option. */ if (i915_modparams.modeset == 0) use_kms = false; - if (vgacon_text_force() && i915_modparams.modeset == -1) + if (drm_firmware_drivers_only() && i915_modparams.modeset == -1) use_kms = false; if (!use_kms) { @@ -67,8 +67,8 @@ static const struct { { .init = i915_mock_selftests }, { .init = i915_pmu_init, .exit = i915_pmu_exit }, - { .init = i915_register_pci_driver, - .exit = i915_unregister_pci_driver }, + { .init = i915_pci_register_driver, + .exit = i915_pci_unregister_driver }, { .init = i915_perf_sysctl_register, .exit = i915_perf_sysctl_unregister }, }; diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index f27eceb82c0f..8d725b64592d 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -55,7 +55,7 @@ struct drm_printer; param(int, enable_fbc, -1, 0600) \ param(int, enable_psr, -1, 0600) \ param(bool, psr_safest_params, false, 0400) \ - param(bool, enable_psr2_sel_fetch, false, 0400) \ + param(bool, enable_psr2_sel_fetch, true, 0400) \ param(int, disable_power_well, -1, 0400) \ param(int, enable_ips, 1, 0600) \ param(int, invert_brightness, 0, 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1bbd09ad5287..ae36dfd77dcf 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -22,18 +22,17 @@ * */ -#include <linux/vga_switcheroo.h> - #include <drm/drm_drv.h> #include <drm/i915_pciids.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ - .graphics_ver = (x), \ - .media_ver = (x), \ + .graphics.ver = (x), \ + .media.ver = (x), \ .display.ver = (x) #define I845_PIPE_OFFSETS \ @@ -145,6 +144,12 @@ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ } +#define ICL_COLORS \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } /* Keep in gen based order, and chronological order within a gen */ @@ -157,8 +162,8 @@ #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -178,8 +183,8 @@ #define I845_FEATURES \ GEN(2), \ - .pipe_mask = BIT(PIPE_A), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A), \ + .display.pipe_mask = BIT(PIPE_A), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -220,8 +225,8 @@ static const struct intel_device_info i865g_info = { #define GEN3_FEATURES \ GEN(3), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .platform_engine_mask = BIT(RCS0), \ @@ -310,8 +315,8 @@ static const struct intel_device_info pnv_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -363,8 +368,8 @@ static const struct intel_device_info gm45_info = { #define GEN5_FEATURES \ GEN(5), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -393,8 +398,8 @@ static const struct intel_device_info ilk_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -444,8 +449,8 @@ static const struct intel_device_info snb_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -499,8 +504,8 @@ static const struct intel_device_info ivb_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .pipe_mask = 0, /* legal, last one wins */ - .cpu_transcoder_mask = 0, + .display.pipe_mask = 0, /* legal, last one wins */ + .display.cpu_transcoder_mask = 0, .has_l3_dpf = 1, }; @@ -508,8 +513,8 @@ static const struct intel_device_info vlv_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_reset_engine = true, @@ -533,12 +538,10 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ - .display.has_psr = 1, \ - .display.has_psr_hw_tracking = 1, \ .display.has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ HSW_PIPE_OFFSETS, \ @@ -605,8 +608,8 @@ static const struct intel_device_info bdw_gt3_info = { static const struct intel_device_info chv_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -642,6 +645,8 @@ static const struct intel_device_info chv_info = { .has_gt_uc = 1, \ .display.has_hdcp = 1, \ .display.has_ipc = 1, \ + .display.has_psr = 1, \ + .display.has_psr_hw_tracking = 1, \ .dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \ .dbuf.slice_mask = BIT(DBUF_S1) @@ -681,8 +686,8 @@ static const struct intel_device_info skl_gt4_info = { .dbuf.slice_mask = BIT(DBUF_S1), \ .display.has_hotplug = 1, \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \ .has_64bit_reloc = 1, \ @@ -790,8 +795,8 @@ static const struct intel_device_info cml_gt2_info = { #define GEN11_FEATURES \ GEN9_FEATURES, \ GEN11_DEFAULT_PAGE_SIZES, \ - .abox_mask = BIT(0), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = BIT(0), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -811,7 +816,7 @@ static const struct intel_device_info cml_gt2_info = { [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ GEN(11), \ - .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \ + ICL_COLORS, \ .dbuf.size = 2048, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ .display.has_dsc = 1, \ @@ -842,9 +847,9 @@ static const struct intel_device_info jsl_info = { #define GEN12_FEATURES \ GEN11_FEATURES, \ GEN(12), \ - .abox_mask = GENMASK(2, 1), \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ + .display.abox_mask = GENMASK(2, 1), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \ .pipe_offsets = { \ @@ -865,7 +870,8 @@ static const struct intel_device_info jsl_info = { }, \ TGL_CURSOR_OFFSETS, \ .has_global_mocs = 1, \ - .display.has_dsb = 1 + .has_pxp = 1, \ + .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */ static const struct intel_device_info tgl_info = { GEN12_FEATURES, @@ -878,9 +884,9 @@ static const struct intel_device_info tgl_info = { static const struct intel_device_info rkl_info = { GEN12_FEATURES, PLATFORM(INTEL_ROCKETLAKE), - .abox_mask = BIT(0), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + .display.abox_mask = BIT(0), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, @@ -891,15 +897,16 @@ static const struct intel_device_info rkl_info = { #define DGFX_FEATURES \ .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \ .has_llc = 0, \ + .has_pxp = 0, \ .has_snoop = 1, \ .is_dgfx = 1 -static const struct intel_device_info dg1_info __maybe_unused = { +static const struct intel_device_info dg1_info = { GEN12_FEATURES, DGFX_FEATURES, - .graphics_rel = 10, + .graphics.rel = 10, PLATFORM(INTEL_DG1), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | @@ -911,8 +918,7 @@ static const struct intel_device_info dg1_info __maybe_unused = { static const struct intel_device_info adl_s_info = { GEN12_FEATURES, PLATFORM(INTEL_ALDERLAKE_S), - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .require_force_probe = 1, + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .display.has_hti = 1, .display.has_psr_hw_tracking = 0, .platform_engine_mask = @@ -929,10 +935,11 @@ static const struct intel_device_info adl_s_info = { } #define XE_LPD_FEATURES \ - .abox_mask = GENMASK(1, 0), \ - .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ + .display.abox_mask = GENMASK(1, 0), \ + .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + }, \ .dbuf.size = 4096, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ @@ -948,18 +955,22 @@ static const struct intel_device_info adl_s_info = { .display.has_ipc = 1, \ .display.has_psr = 1, \ .display.ver = 13, \ - .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ + .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ XE_LPD_CURSOR_OFFSETS @@ -967,7 +978,9 @@ static const struct intel_device_info adl_p_info = { GEN12_FEATURES, XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), - .require_force_probe = 1, + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, @@ -985,8 +998,8 @@ static const struct intel_device_info adl_p_info = { I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .graphics_ver = 12, \ - .graphics_rel = 50, \ + .graphics.ver = 12, \ + .graphics.rel = 50, \ XE_HP_PAGE_SIZES, \ .dma_mask_size = 46, \ .has_64bit_reloc = 1, \ @@ -1004,8 +1017,8 @@ static const struct intel_device_info adl_p_info = { .ppgtt_type = INTEL_PPGTT_FULL #define XE_HPM_FEATURES \ - .media_ver = 12, \ - .media_rel = 50 + .media.ver = 12, \ + .media.rel = 50 __maybe_unused static const struct intel_device_info xehpsdv_info = { @@ -1014,7 +1027,6 @@ static const struct intel_device_info xehpsdv_info = { DGFX_FEATURES, PLATFORM(INTEL_XEHPSDV), .display = { }, - .pipe_mask = 0, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | @@ -1029,14 +1041,16 @@ static const struct intel_device_info dg2_info = { XE_HPM_FEATURES, XE_LPD_FEATURES, DGFX_FEATURES, - .graphics_rel = 55, - .media_rel = 55, + .graphics.rel = 55, + .media.rel = 55, PLATFORM(INTEL_DG2), .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VCS0) | BIT(VCS2), .require_force_probe = 1, + .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; #undef PLATFORM @@ -1115,6 +1129,8 @@ static const struct pci_device_id pciidlist[] = { INTEL_RKL_IDS(&rkl_info), INTEL_ADLS_IDS(&adl_s_info), INTEL_ADLP_IDS(&adl_p_info), + INTEL_DG1_IDS(&dg1_info), + INTEL_RPLS_IDS(&adl_s_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -1187,11 +1203,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (vga_switcheroo_client_probe_defer(pdev)) + /* Detect if we need to wait for other drivers early on */ + if (intel_modeset_probe_defer(pdev)) return -EPROBE_DEFER; err = i915_driver_probe(pdev, ent); @@ -1234,12 +1247,12 @@ static struct pci_driver i915_pci_driver = { .driver.pm = &i915_pm_ops, }; -int i915_register_pci_driver(void) +int i915_pci_register_driver(void) { return pci_register_driver(&i915_pci_driver); } -void i915_unregister_pci_driver(void) +void i915_pci_unregister_driver(void) { pci_unregister_driver(&i915_pci_driver); } diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h index b386f319f52e..ee048c238174 100644 --- a/drivers/gpu/drm/i915/i915_pci.h +++ b/drivers/gpu/drm/i915/i915_pci.h @@ -1,8 +1,12 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2021 Intel Corporation */ -int i915_register_pci_driver(void); -void i915_unregister_pci_driver(void); +#ifndef __I915_PCI_H__ +#define __I915_PCI_H__ + +int i915_pci_register_driver(void); +void i915_pci_unregister_driver(void); + +#endif /* __I915_PCI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index e49da36c62fb..51b368be0fc4 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -124,7 +124,9 @@ query_engine_info(struct drm_i915_private *i915, for_each_uabi_engine(engine, i915) { info.engine.engine_class = engine->uabi_class; info.engine.engine_instance = engine->uabi_instance; + info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE; info.capabilities = engine->uabi_capabilities; + info.logical_instance = ilog2(engine->logical_mask); if (copy_to_user(info_ptr, &info, sizeof(info))) return -EFAULT; @@ -432,9 +434,6 @@ static int query_memregion_info(struct drm_i915_private *i915, u32 total_length; int ret, id, i; - if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) - return -ENODEV; - if (query_item->flags != 0) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 664970f2bc62..d27ba273cc68 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -371,6 +371,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_G3DCTL _MMIO(0x9024) #define VLV_GSCKGCTL _MMIO(0x9028) +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN REG_BIT(30) + #define GEN6_MBCTL _MMIO(0x0907c) #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) @@ -498,6 +501,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOBITS_PPGTT_CACHE64B (3 << 8) #define ECOBITS_PPGTT_CACHE4B (0 << 8) +#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54) +#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) +#define GLOBAL_INVALIDATION_MODE REG_BIT(2) + +#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c) +#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12) +#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11) +#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7) + +#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) +#define FORCE_MISS_FTLB REG_BIT(3) + #define GAB_CTL _MMIO(0x24000) #define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) @@ -719,6 +734,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_OA_TLB_INV_CR _MMIO(0xceec) +#define GEN12_SQCM _MMIO(0x8724) +#define EN_32B_ACCESS REG_BIT(30) + /* Gen12 OAR unit */ #define GEN12_OAR_OACONTROL _MMIO(0x2960) #define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 @@ -770,6 +788,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define EU_PERF_CNTL5 _MMIO(0xe55c) #define EU_PERF_CNTL6 _MMIO(0xe65c) +#define RT_CTRL _MMIO(0xe530) +#define DIS_NULL_QUERY REG_BIT(10) + /* * OA Boolean state */ @@ -1968,7 +1989,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) _ICL_PORT_PCS_LN(ln) + 4 * (dw)) #define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) #define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) -#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy)) +#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) #define DCC_MODE_SELECT_MASK (0x3 << 20) #define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) #define COMMON_KEEPER_EN (1 << 26) @@ -1989,7 +2010,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) #define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) -#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy)) +#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) @@ -2001,7 +2022,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) #define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) -#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy)) #define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) @@ -2013,7 +2033,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) #define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) -#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy)) +#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -2024,14 +2044,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) #define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) -#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy)) #define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) #define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) #define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) -#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy)) +#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy)) #define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) #define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) #define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) @@ -2237,11 +2256,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004) #define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30) #define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29) #define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26) #define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24) +#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16) #define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10) +#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) +#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) #define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) +#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) #define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) #define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) @@ -2551,6 +2575,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_HWS_PGA(base) _MMIO((base) + 0x80) #define RING_ID(base) _MMIO((base) + 0x8c) #define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) + +#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4) +/* + * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. + * The lsb of each can be considered a separate enabling bit for encryption. + * 6:0 == default MOCS value for reads => 6:1 == table index for reads. + * 13:7 == default MOCS value for writes => 13:8 == table index for writes. + * 15:14 == Reserved => 31:30 are set to 0. + */ +#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7) +#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0) +#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \ + CMD_CCTL_READ_OVERRIDE_MASK) +#define CMD_CCTL_MOCS_OVERRIDE(write, read) \ + (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ + REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) + +#define BLIT_CCTL(base) _MMIO((base) + 0x204) +#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) +#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) +#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \ + BLIT_CCTL_SRC_MOCS_MASK) +#define BLIT_CCTL_MOCS(dst, src) \ + (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \ + REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1)) + #define RING_RESET_CTL(base) _MMIO((base) + 0xd0) #define RESET_CTL_CAT_ERROR REG_BIT(2) #define RESET_CTL_READY_TO_RESET REG_BIT(1) @@ -2634,6 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +#define GUCPMTIMESTAMP _MMIO(0xC3E8) + /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ #define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) #define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) @@ -2686,6 +2738,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) #define GEN7_SAMPLER_INSTDONE _MMIO(0xe160) #define GEN7_ROW_INSTDONE _MMIO(0xe164) +#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c) #define MCFG_MCR_SELECTOR _MMIO(0xfd0) #define SF_MCR_SELECTOR _MMIO(0xfd8) #define GEN8_MCR_SELECTOR _MMIO(0xfdc) @@ -2743,6 +2796,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) #define IECPUNIT_CLKGATE_DIS REG_BIT(22) +#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) +#define ALNUNIT_CLKGATE_DIS REG_BIT(13) + #define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) #define ERR_INT_POISON (1 << 31) @@ -2766,12 +2822,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) -#define FPGA_DBG_RM_NOCLAIM (1 << 31) +#define FPGA_DBG_RM_NOCLAIM REG_BIT(31) #define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) -#define CLAIM_ER_CLR (1 << 31) -#define CLAIM_ER_OVERFLOW (1 << 16) -#define CLAIM_ER_CTR_MASK 0xffff +#define CLAIM_ER_CLR REG_BIT(31) +#define CLAIM_ER_OVERFLOW REG_BIT(16) +#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0) #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ @@ -2820,6 +2876,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MI_MODE _MMIO(0x209c) # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 12) +# define TGL_NESTED_BB_EN (1 << 12) # define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define MODE_IDLE (1 << 9) # define STOP_RING (1 << 8) @@ -2840,6 +2897,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) #define GEN11_ENABLE_32_PLANE_MODE (1 << 7) +#define SCCGCTL94DC _MMIO(0x94dc) +#define CG3DDISURB REG_BIT(14) + +#define MLTICTXCTL _MMIO(0xb170) +#define TDONRENDER REG_BIT(2) + +#define L3SQCREG1_CCS0 _MMIO(0xb200) +#define FLUSHALLNONCOH REG_BIT(5) + /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) @@ -3076,12 +3142,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) #define GEN10_CACHE_MODE_SS _MMIO(0xe420) -#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4) +#define ENABLE_PREFETCH_INTO_IC REG_BIT(3) +#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4) /* Fuse readout registers for GT */ #define HSW_PAVP_FUSE1 _MMIO(0x911C) -#define HSW_F1_EU_DIS_SHIFT 16 -#define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT) +#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24) +#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16) #define HSW_F1_EU_DIS_10EUS 0 #define HSW_F1_EU_DIS_8EUS 1 #define HSW_F1_EU_DIS_6EUS 2 @@ -3150,7 +3217,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C) -#define GEN12_GT_DSS_ENABLE _MMIO(0x913C) +#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C) +#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144) #define XEHP_EU_ENABLE _MMIO(0x9134) #define XEHP_EU_ENA_MASK 0xFF @@ -3276,89 +3344,98 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ #define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ #define FBC_CONTROL _MMIO(0x3208) -#define FBC_CTL_EN REG_BIT(31) -#define FBC_CTL_PERIODIC REG_BIT(30) -#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) -#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) -#define FBC_CTL_STOP_ON_MOD REG_BIT(15) -#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ -#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */ -#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) -#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) -#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) -#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) +#define FBC_CTL_EN REG_BIT(31) +#define FBC_CTL_PERIODIC REG_BIT(30) +#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) +#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) +#define FBC_CTL_STOP_ON_MOD REG_BIT(15) +#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ +#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ +#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) +#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) +#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) #define FBC_COMMAND _MMIO(0x320c) -#define FBC_CMD_COMPRESS (1 << 0) +#define FBC_CMD_COMPRESS REG_BIT(0) #define FBC_STATUS _MMIO(0x3210) -#define FBC_STAT_COMPRESSING (1 << 31) -#define FBC_STAT_COMPRESSED (1 << 30) -#define FBC_STAT_MODIFIED (1 << 29) -#define FBC_STAT_CURRENT_LINE_SHIFT (0) -#define FBC_CONTROL2 _MMIO(0x3214) -#define FBC_CTL_FENCE_DBL (0 << 4) -#define FBC_CTL_IDLE_IMM (0 << 2) -#define FBC_CTL_IDLE_FULL (1 << 2) -#define FBC_CTL_IDLE_LINE (2 << 2) -#define FBC_CTL_IDLE_DEBUG (3 << 2) -#define FBC_CTL_CPU_FENCE (1 << 1) -#define FBC_CTL_PLANE(plane) ((plane) << 0) -#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ -#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) +#define FBC_STAT_COMPRESSING REG_BIT(31) +#define FBC_STAT_COMPRESSED REG_BIT(30) +#define FBC_STAT_MODIFIED REG_BIT(29) +#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) +#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ +#define FBC_CTL_FENCE_DBL REG_BIT(4) +#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) +#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) +#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) +#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) +#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) +#define FBC_CTL_CPU_FENCE_EN REG_BIT(1) +#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) +#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) +#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ +#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ +#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) +#define FBC_MOD_NUM_VALID REG_BIT(0) +#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ +#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ +#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) +#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) +#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) +#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) #define FBC_LL_SIZE (1536) -#define FBC_LLC_READ_CTRL _MMIO(0x9044) -#define FBC_LLC_FULLY_OPEN (1 << 30) - /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE _MMIO(0x43200) #define DPFC_CONTROL _MMIO(0x3208) -#define DPFC_CTL_EN (1 << 31) -#define DPFC_CTL_PLANE(plane) ((plane) << 30) -#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29) -#define DPFC_CTL_FENCE_EN (1 << 29) -#define IVB_DPFC_CTL_FENCE_EN (1 << 28) -#define DPFC_CTL_PERSISTENT_MODE (1 << 25) -#define DPFC_SR_EN (1 << 10) -#define DPFC_CTL_LIMIT_1X (0 << 6) -#define DPFC_CTL_LIMIT_2X (1 << 6) -#define DPFC_CTL_LIMIT_4X (2 << 6) +#define ILK_DPFC_CONTROL _MMIO(0x43208) +#define DPFC_CTL_EN REG_BIT(31) +#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ +#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ +#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ +#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ +#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ +#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ +#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ +#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ +#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) +#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) +#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) +#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) +#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) #define DPFC_RECOMP_CTL _MMIO(0x320c) -#define DPFC_RECOMP_STALL_EN (1 << 27) -#define DPFC_RECOMP_STALL_WM_SHIFT (16) -#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) -#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) -#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) +#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define DPFC_RECOMP_STALL_EN REG_BIT(27) +#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) +#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) #define DPFC_STATUS _MMIO(0x3210) -#define DPFC_INVAL_SEG_SHIFT (16) -#define DPFC_INVAL_SEG_MASK (0x07ff0000) -#define DPFC_COMP_SEG_SHIFT (0) -#define DPFC_COMP_SEG_MASK (0x000007ff) +#define ILK_DPFC_STATUS _MMIO(0x43210) +#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) +#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) #define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2 _MMIO(0x43214) +#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) #define DPFC_FENCE_YOFF _MMIO(0x3218) -#define DPFC_CHICKEN _MMIO(0x3224) -#define DPFC_HT_MODIFY (1 << 31) - -/* Framebuffer compression for Ironlake */ -#define ILK_DPFC_CB_BASE _MMIO(0x43200) -#define ILK_DPFC_CONTROL _MMIO(0x43208) -#define FBC_CTL_FALSE_COLOR (1 << 10) -/* The bit 28-8 is reserved */ -#define DPFC_RESERVED (0x1FFFFF00) -#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) -#define ILK_DPFC_STATUS _MMIO(0x43210) -#define ILK_DPFC_COMP_SEG_MASK 0x7ff -#define IVB_FBC_STATUS2 _MMIO(0x43214) -#define IVB_FBC_COMP_SEG_MASK 0x7ff -#define BDW_FBC_COMP_SEG_MASK 0xfff #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) +#define DPFC_CHICKEN _MMIO(0x3224) #define ILK_DPFC_CHICKEN _MMIO(0x43224) -#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) -#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14) -#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) +#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ +#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ +#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ + +#define GLK_FBC_STRIDE _MMIO(0x43228) +#define FBC_STRIDE_OVERRIDE REG_BIT(15) +#define FBC_STRIDE_MASK REG_GENMASK(14, 0) +#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) + #define ILK_FBC_RT_BASE _MMIO(0x2128) -#define ILK_FBC_RT_VALID (1 << 0) -#define SNB_FBC_FRONT_BUFFER (1 << 1) +#define ILK_FBC_RT_VALID REG_BIT(0) +#define SNB_FBC_FRONT_BUFFER REG_BIT(1) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) @@ -3382,8 +3459,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA _MMIO(0x100100) -#define SNB_CPU_FENCE_ENABLE (1 << 29) -#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) +#define SNB_DPFC_FENCE_EN REG_BIT(29) +#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) +#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) +#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) /* Framebuffer compression for Ivybridge */ #define IVB_FBC_RT_BASE _MMIO(0x7020) @@ -3393,8 +3472,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPS_ENABLE (1 << 31) #define MSG_FBC_REND_STATE _MMIO(0x50380) -#define FBC_REND_NUKE (1 << 2) -#define FBC_REND_CACHE_CLEAN (1 << 1) +#define FBC_REND_NUKE REG_BIT(2) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) /* * GPIO regs @@ -4113,6 +4192,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RPN_CAP_MASK REG_GENMASK(23, 16) #define BXT_RP_STATE_CAP _MMIO(0x138170) #define GEN9_RP_STATE_LIMITS _MMIO(0x138148) +#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014) /* * Logical Context regs @@ -4231,6 +4311,7 @@ enum { #define DUPS1_GATING_DIS (1 << 15) #define DUPS2_GATING_DIS (1 << 19) #define DUPS3_GATING_DIS (1 << 23) +#define CURSOR_GATING_DIS REG_BIT(28) #define DPF_GATING_DIS (1 << 10) #define DPF_RAM_GATING_DIS (1 << 9) #define DPFR_GATING_DIS (1 << 8) @@ -4241,21 +4322,62 @@ enum { /* * GEN10 clock gating regs */ + +#define UNSLCGCTL9440 _MMIO(0x9440) +#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) +#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) +#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) +#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) +#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) +#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) +#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) +#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) +#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) +#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) +#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) +#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) + +#define UNSLCGCTL9444 _MMIO(0x9444) +#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) +#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) +#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) +#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) +#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) +#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) +#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) +#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) +#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) +#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) +#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) +#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) +#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) +#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) +#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) +#define LTCDD_CLKGATE_DIS REG_BIT(10) + #define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) #define SARBUNIT_CLKGATE_DIS (1 << 5) #define RCCUNIT_CLKGATE_DIS (1 << 7) #define MSCUNIT_CLKGATE_DIS (1 << 10) +#define NODEDSS_CLKGATE_DIS REG_BIT(12) #define L3_CLKGATE_DIS REG_BIT(16) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) -#define GWUNIT_CLKGATE_DIS (1 << 16) +#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28) +#define GWUNIT_CLKGATE_DIS REG_BIT(16) #define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) +#define SSMCGCTL9530 _MMIO(0x9530) +#define RTFUNIT_CLKGATE_DIS REG_BIT(18) + #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) #define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ +#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ +#define GAMEDIA_CLKGATE_DIS REG_BIT(11) #define HSUNIT_CLKGATE_DIS REG_BIT(8) #define VSUNIT_CLKGATE_DIS REG_BIT(3) @@ -4272,47 +4394,52 @@ enum { /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A 0x60050 -#define PIPE_CRC_ENABLE (1 << 31) +#define PIPE_CRC_ENABLE REG_BIT(31) /* skl+ source selection */ -#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) -#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) -#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) -#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) -#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) -#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) -#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) -#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) +#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0) +#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2) +#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4) +#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6) +#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7) +#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5) +#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3) +#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1) /* ivb+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) -#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) -#define PIPE_CRC_SOURCE_PF_IVB (2 << 29) +#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29) +#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0) +#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1) +#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2) /* ilk+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) -#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) -#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) -/* embedded DP port on the north display block, reserved on ivb */ -#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) -#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ +#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0) +#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1) +#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2) +/* embedded DP port on the north display block */ +#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4) +#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5) /* vlv source selection */ -#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) -#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) -#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) +#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27) +#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0) +#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1) +#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2) /* with DP port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) -#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) -#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) +#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3) +#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6) +#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7) /* gen3+ source selection */ -#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) -#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) -#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) +#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0) +#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1) +#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2) /* with DP/TV port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) -#define PIPE_CRC_SOURCE_TV_PRE (4 << 28) -#define PIPE_CRC_SOURCE_TV_POST (5 << 28) -#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) -#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) +#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3) +#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4) +#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5) +#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6) +#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7) /* gen2 doesn't have source selection bits */ -#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) +#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30) #define _PIPE_CRC_RES_1_A_IVB 0x60064 #define _PIPE_CRC_RES_2_A_IVB 0x60068 @@ -4509,11 +4636,9 @@ enum { * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one * instance of it */ -#define _HSW_EDP_PSR_BASE 0x64800 #define _SRD_CTL_A 0x60800 #define _SRD_CTL_EDP 0x6f800 -#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust) -#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A)) +#define EDP_PSR_CTL(tran) _MMIO(_TRANS2(tran, _SRD_CTL_A)) #define EDP_PSR_ENABLE (1 << 31) #define BDW_PSR_SINGLE_FRAME (1 << 30) #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ @@ -4557,22 +4682,13 @@ enum { #define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans)) #define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans)) -#define _SRD_AUX_CTL_A 0x60810 -#define _SRD_AUX_CTL_EDP 0x6f810 -#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A)) -#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) -#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20) -#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16) -#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11) -#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff) - #define _SRD_AUX_DATA_A 0x60814 #define _SRD_AUX_DATA_EDP 0x6f814 -#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */ +#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */ #define _SRD_STATUS_A 0x60840 #define _SRD_STATUS_EDP 0x6f840 -#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A)) +#define EDP_PSR_STATUS(tran) _MMIO(_TRANS2(tran, _SRD_STATUS_A)) #define EDP_PSR_STATUS_STATE_MASK (7 << 29) #define EDP_PSR_STATUS_STATE_SHIFT 29 #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) @@ -4599,13 +4715,13 @@ enum { #define _SRD_PERF_CNT_A 0x60844 #define _SRD_PERF_CNT_EDP 0x6f844 -#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A)) +#define EDP_PSR_PERF_CNT(tran) _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A)) #define EDP_PSR_PERF_CNT_MASK 0xffffff /* PSR_MASK on SKL+ */ #define _SRD_DEBUG_A 0x60860 #define _SRD_DEBUG_EDP 0x6f860 -#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A)) +#define EDP_PSR_DEBUG(tran) _MMIO(_TRANS2(tran, _SRD_DEBUG_A)) #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) @@ -4672,11 +4788,11 @@ enum { #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) #define PSR_EVENT_PSR_DISABLE (1 << 0) -#define _PSR2_STATUS_A 0x60940 -#define _PSR2_STATUS_EDP 0x6f940 -#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) -#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) -#define EDP_PSR2_STATUS_STATE_SHIFT 28 +#define _PSR2_STATUS_A 0x60940 +#define _PSR2_STATUS_EDP 0x6f940 +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) +#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) +#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) #define _PSR2_SU_STATUS_A 0x60914 #define _PSR2_SU_STATUS_EDP 0x6f914 @@ -4973,9 +5089,9 @@ enum { #define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) -#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ -#define PIPE_B_SCRAMBLE_RESET (1 << 1) -#define PIPE_A_SCRAMBLE_RESET (1 << 0) +#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */ +#define PIPE_B_SCRAMBLE_RESET REG_BIT(1) +#define PIPE_A_SCRAMBLE_RESET REG_BIT(0) /* Gen 3 SDVO bits: */ #define SDVO_ENABLE (1 << 31) @@ -6240,55 +6356,55 @@ enum { #define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) -#define PIPEB_LINE_COMPARE_INT_EN (1 << 29) -#define PIPEB_HLINE_INT_EN (1 << 28) -#define PIPEB_VBLANK_INT_EN (1 << 27) -#define SPRITED_FLIP_DONE_INT_EN (1 << 26) -#define SPRITEC_FLIP_DONE_INT_EN (1 << 25) -#define PLANEB_FLIP_DONE_INT_EN (1 << 24) -#define PIPE_PSR_INT_EN (1 << 22) -#define PIPEA_LINE_COMPARE_INT_EN (1 << 21) -#define PIPEA_HLINE_INT_EN (1 << 20) -#define PIPEA_VBLANK_INT_EN (1 << 19) -#define SPRITEB_FLIP_DONE_INT_EN (1 << 18) -#define SPRITEA_FLIP_DONE_INT_EN (1 << 17) -#define PLANEA_FLIPDONE_INT_EN (1 << 16) -#define PIPEC_LINE_COMPARE_INT_EN (1 << 13) -#define PIPEC_HLINE_INT_EN (1 << 12) -#define PIPEC_VBLANK_INT_EN (1 << 11) -#define SPRITEF_FLIPDONE_INT_EN (1 << 10) -#define SPRITEE_FLIPDONE_INT_EN (1 << 9) -#define PLANEC_FLIPDONE_INT_EN (1 << 8) +#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) +#define PIPEB_HLINE_INT_EN REG_BIT(28) +#define PIPEB_VBLANK_INT_EN REG_BIT(27) +#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26) +#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25) +#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24) +#define PIPE_PSR_INT_EN REG_BIT(22) +#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21) +#define PIPEA_HLINE_INT_EN REG_BIT(20) +#define PIPEA_VBLANK_INT_EN REG_BIT(19) +#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18) +#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17) +#define PLANEA_FLIPDONE_INT_EN REG_BIT(16) +#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13) +#define PIPEC_HLINE_INT_EN REG_BIT(12) +#define PIPEC_VBLANK_INT_EN REG_BIT(11) +#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10) +#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9) +#define PLANEC_FLIPDONE_INT_EN REG_BIT(8) #define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ -#define SPRITEF_INVALID_GTT_INT_EN (1 << 27) -#define SPRITEE_INVALID_GTT_INT_EN (1 << 26) -#define PLANEC_INVALID_GTT_INT_EN (1 << 25) -#define CURSORC_INVALID_GTT_INT_EN (1 << 24) -#define CURSORB_INVALID_GTT_INT_EN (1 << 23) -#define CURSORA_INVALID_GTT_INT_EN (1 << 22) -#define SPRITED_INVALID_GTT_INT_EN (1 << 21) -#define SPRITEC_INVALID_GTT_INT_EN (1 << 20) -#define PLANEB_INVALID_GTT_INT_EN (1 << 19) -#define SPRITEB_INVALID_GTT_INT_EN (1 << 18) -#define SPRITEA_INVALID_GTT_INT_EN (1 << 17) -#define PLANEA_INVALID_GTT_INT_EN (1 << 16) -#define DPINVGTT_EN_MASK 0xff0000 -#define DPINVGTT_EN_MASK_CHV 0xfff0000 -#define SPRITEF_INVALID_GTT_STATUS (1 << 11) -#define SPRITEE_INVALID_GTT_STATUS (1 << 10) -#define PLANEC_INVALID_GTT_STATUS (1 << 9) -#define CURSORC_INVALID_GTT_STATUS (1 << 8) -#define CURSORB_INVALID_GTT_STATUS (1 << 7) -#define CURSORA_INVALID_GTT_STATUS (1 << 6) -#define SPRITED_INVALID_GTT_STATUS (1 << 5) -#define SPRITEC_INVALID_GTT_STATUS (1 << 4) -#define PLANEB_INVALID_GTT_STATUS (1 << 3) -#define SPRITEB_INVALID_GTT_STATUS (1 << 2) -#define SPRITEA_INVALID_GTT_STATUS (1 << 1) -#define PLANEA_INVALID_GTT_STATUS (1 << 0) -#define DPINVGTT_STATUS_MASK 0xff -#define DPINVGTT_STATUS_MASK_CHV 0xfff +#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16) +#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16) +#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27) +#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26) +#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25) +#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24) +#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23) +#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22) +#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21) +#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20) +#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19) +#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18) +#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17) +#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16) +#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0) +#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0) +#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11) +#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10) +#define PLANEC_INVALID_GTT_STATUS REG_BIT(9) +#define CURSORC_INVALID_GTT_STATUS REG_BIT(8) +#define CURSORB_INVALID_GTT_STATUS REG_BIT(7) +#define CURSORA_INVALID_GTT_STATUS REG_BIT(6) +#define SPRITED_INVALID_GTT_STATUS REG_BIT(5) +#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4) +#define PLANEB_INVALID_GTT_STATUS REG_BIT(3) +#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2) +#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1) +#define PLANEA_INVALID_GTT_STATUS REG_BIT(0) #define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) @@ -6851,7 +6967,7 @@ enum { #define DVS_SOURCE_KEY (1 << 22) #define DVS_RGB_ORDER_XBGR (1 << 20) #define DVS_YUV_FORMAT_BT709 (1 << 18) -#define DVS_YUV_BYTE_ORDER_MASK (3 << 16) +#define DVS_YUV_ORDER_MASK (3 << 16) #define DVS_YUV_ORDER_YUYV (0 << 16) #define DVS_YUV_ORDER_UYVY (1 << 16) #define DVS_YUV_ORDER_YVYU (2 << 16) @@ -6930,7 +7046,7 @@ enum { #define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) #define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ -#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16) +#define SPRITE_YUV_ORDER_MASK (3 << 16) #define SPRITE_YUV_ORDER_YUYV (0 << 16) #define SPRITE_YUV_ORDER_UYVY (1 << 16) #define SPRITE_YUV_ORDER_YVYU (2 << 16) @@ -7015,7 +7131,7 @@ enum { #define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ #define SP_SOURCE_KEY (1 << 22) #define SP_YUV_FORMAT_BT709 (1 << 18) -#define SP_YUV_BYTE_ORDER_MASK (3 << 16) +#define SP_YUV_ORDER_MASK (3 << 16) #define SP_YUV_ORDER_YUYV (0 << 16) #define SP_YUV_ORDER_UYVY (1 << 16) #define SP_YUV_ORDER_YVYU (2 << 16) @@ -7156,10 +7272,10 @@ enum { #define PLANE_CTL_YUV420_Y_PLANE (1 << 19) #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_YUYV (0 << 16) -#define PLANE_CTL_YUV422_UYVY (1 << 16) -#define PLANE_CTL_YUV422_YVYU (2 << 16) -#define PLANE_CTL_YUV422_VYUY (3 << 16) +#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16) +#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16) +#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16) +#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16) #define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ @@ -7213,10 +7329,10 @@ enum { #define _PLANE_CUS_CTL_1_A 0x701c8 #define _PLANE_CUS_CTL_2_A 0x702c8 #define PLANE_CUS_ENABLE (1 << 31) -#define PLANE_CUS_PLANE_4_RKL (0 << 30) -#define PLANE_CUS_PLANE_5_RKL (1 << 30) -#define PLANE_CUS_PLANE_6 (0 << 30) -#define PLANE_CUS_PLANE_7 (1 << 30) +#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30) +#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30) +#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30) +#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30) #define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) #define PLANE_CUS_HPHASE_0 (0 << 16) #define PLANE_CUS_HPHASE_0_25 (1 << 16) @@ -7230,6 +7346,7 @@ enum { #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ #define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ #define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */ #define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */ #define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ #define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) @@ -7247,12 +7364,12 @@ enum { #define _PLANE_NV12_BUF_CFG_1_A 0x70278 #define _PLANE_NV12_BUF_CFG_2_A 0x70378 -#define _PLANE_CC_VAL_1_B 0x711b4 -#define _PLANE_CC_VAL_2_B 0x712b4 -#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) -#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) -#define PLANE_CC_VAL(pipe, plane) \ - _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe)) +#define _PLANE_CC_VAL_1_B 0x711b4 +#define _PLANE_CC_VAL_2_B 0x712b4 +#define _PLANE_CC_VAL_1(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) + (dw) * 4) +#define _PLANE_CC_VAL_2(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) + (dw) * 4) +#define PLANE_CC_VAL(pipe, plane, dw) \ + _MMIO_PLANE((plane), _PLANE_CC_VAL_1((pipe), (dw)), _PLANE_CC_VAL_2((pipe), (dw))) /* Input CSC Register Definitions */ #define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0 @@ -7353,6 +7470,7 @@ enum { #define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) #define PLANE_SURF(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) +#define PLANE_SURF_DECRYPT REG_BIT(2) #define _PLANE_OFFSET_1_B 0x711a4 #define _PLANE_OFFSET_2_B 0x712a4 @@ -8094,6 +8212,7 @@ enum { /* irq instances for OTHER_CLASS */ #define OTHER_GUC_INSTANCE 0 #define OTHER_GTPM_INSTANCE 1 +#define OTHER_KCR_INSTANCE 4 #define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4)) @@ -8176,8 +8295,9 @@ enum { #define GLK_CL0_PWR_DOWN (1 << 10) #define CHICKEN_MISC_4 _MMIO(0x4208c) -#define FBC_STRIDE_OVERRIDE (1 << 13) -#define FBC_STRIDE_MASK 0x1FFF +#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13) +#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0) +#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x)) #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 @@ -8193,6 +8313,11 @@ enum { #define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3) #define HSW_FBCQ_DIS (1 << 22) #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) +#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0) +#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0) +#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1) +#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2) +#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) #define _CHICKEN_TRANS_A 0x420c0 @@ -8211,6 +8336,7 @@ enum { #define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */ #define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23) #define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19) +#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18) #define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18) #define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */ #define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */ @@ -8227,7 +8353,7 @@ enum { /* * The below are numbered starting from "S1" on gen11/gen12, but starting - * with gen13 display, the bspec switches to a 0-based numbering scheme + * with display 13, the bspec switches to a 0-based numbering scheme * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). * We'll just use the 0-based numbering here for all platforms since it's the * way things will be named by the hardware team going forward, plus it's more @@ -8272,9 +8398,10 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) -#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) -#define ICL_DELAY_PMRSP (1 << 22) -#define MASK_WAKEMEM (1 << 13) +#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) +#define ICL_DELAY_PMRSP REG_BIT(22) +#define DISABLE_FLR_SRC REG_BIT(15) +#define MASK_WAKEMEM REG_BIT(13) #define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434) #define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27) @@ -8315,6 +8442,9 @@ enum { #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) +#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC) +#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0) + #define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) @@ -8338,9 +8468,10 @@ enum { #define GEN8_ERRDETBCTRL (1 << 9) #define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) - #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) - #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) - #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) +#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) +#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12) +#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) +#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) #define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) @@ -8394,6 +8525,12 @@ enum { #define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) #define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) +#define GEN11_L3SQCREG5 _MMIO(0xb158) +#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) + +#define XEHP_L3SCQREG7 _MMIO(0xb188) +#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) + /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) #define ICL_HDC_MODE _MMIO(0xE5F4) @@ -8404,6 +8541,12 @@ enum { #define HDC_FORCE_NON_COHERENT (1 << 4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) +#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0) +#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11) + +#define SARB_CHICKEN1 _MMIO(0xe90c) +#define COMP_CKN_IN REG_GENMASK(30, 29) + #define GEN8_HDC_CHICKEN1 _MMIO(0x7304) /* GEN9 chicken */ @@ -8431,8 +8574,13 @@ enum { _PIPEB_CHICKEN) #define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30) #define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30) -#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) -#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15) +#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12) +#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7) + +#define VFLSKPD _MMIO(0x62a8) +#define DIS_OVER_FETCH_CACHE REG_BIT(1) +#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0) #define FF_MODE2 _MMIO(0x6604) #define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24) @@ -9096,6 +9244,29 @@ enum { #define TRANS_DP_HSYNC_ACTIVE_LOW 0 #define TRANS_DP_SYNC_MASK (3 << 3) +#define _TRANS_DP2_CTL_A 0x600a0 +#define _TRANS_DP2_CTL_B 0x610a0 +#define _TRANS_DP2_CTL_C 0x620a0 +#define _TRANS_DP2_CTL_D 0x630a0 +#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B) +#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31) +#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30) +#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23) + +#define _TRANS_DP2_VFREQHIGH_A 0x600a4 +#define _TRANS_DP2_VFREQHIGH_B 0x610a4 +#define _TRANS_DP2_VFREQHIGH_C 0x620a4 +#define _TRANS_DP2_VFREQHIGH_D 0x630a4 +#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B) +#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8) +#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz)) + +#define _TRANS_DP2_VFREQLOW_A 0x600a8 +#define _TRANS_DP2_VFREQLOW_B 0x610a8 +#define _TRANS_DP2_VFREQLOW_C 0x620a8 +#define _TRANS_DP2_VFREQLOW_D 0x630a8 +#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B) + /* SNB eDP training params */ /* SNB A-stepping */ #define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22) @@ -9234,6 +9405,9 @@ enum { #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) #define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) +#define UNSLCGCTL9430 _MMIO(0x9430) +#define MSQDUNIT_CLKGATE_DIS REG_BIT(3) + #define GEN6_GFXPAUSE _MMIO(0xA000) #define GEN6_RPNSWREQ _MMIO(0xA008) #define GEN6_TURBO_DISABLE (1 << 31) @@ -9549,24 +9723,39 @@ enum { #define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) -#define FLOW_CONTROL_ENABLE (1 << 15) -#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) -#define STALL_DOP_GATING_DISABLE (1 << 5) -#define THROTTLE_12_5 (7 << 2) -#define DISABLE_EARLY_EOT (1 << 1) +#define FLOW_CONTROL_ENABLE REG_BIT(15) +#define UGM_BACKUP_MODE REG_BIT(13) +#define MDQ_ARBITRATION_MODE REG_BIT(12) +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) +#define STALL_DOP_GATING_DISABLE REG_BIT(5) +#define THROTTLE_12_5 REG_GENMASK(4, 2) +#define DISABLE_EARLY_EOT REG_BIT(1) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15) #define GEN12_DISABLE_EARLY_READ REG_BIT(14) +#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) #define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) +#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) +#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) +#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) +#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) +#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32) +#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32) +#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32) +#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) + #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1 << 0) #define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) #define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) -#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) -#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) -#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) +#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) +#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) +#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -9581,9 +9770,10 @@ enum { #define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) -#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8) -#define GEN9_ENABLE_YV12_BUGFIX (1 << 4) -#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) +#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8) +#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4) +#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2) /* Audio */ #define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020) @@ -9710,6 +9900,11 @@ enum { #define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4)) #define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4)) +#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc +#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc +#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL) +#define AUD_ENABLE_SDP_SPLIT REG_BIT(31) + #define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) @@ -9717,6 +9912,10 @@ enum { #define AUD_PIN_BUF_CTL _MMIO(0x48414) #define AUD_PIN_BUF_ENABLE REG_BIT(31) +#define AUD_TS_CDCLK_M _MMIO(0x65ea0) +#define AUD_TS_CDCLK_M_EN REG_BIT(31) +#define AUD_TS_CDCLK_N _MMIO(0x65ea4) + /* Display Audio Config Reg */ #define AUD_CONFIG_BE _MMIO(0x65ef0) #define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe))) @@ -10148,14 +10347,12 @@ enum skl_power_gate { #define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) #define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) #define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) -#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT) -#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1) #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) #define TRANS_DDI_MODE_SELECT_DVI (1 << 24) #define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24) #define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24) -#define TRANS_DDI_MODE_SELECT_FDI (4 << 24) +#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24) #define TRANS_DDI_BPC_MASK (7 << 20) #define TRANS_DDI_BPC_8 (0 << 20) #define TRANS_DDI_BPC_10 (1 << 20) @@ -10459,6 +10656,14 @@ enum skl_power_gate { #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) +/* CDCLK_SQUASH_CTL */ +#define CDCLK_SQUASH_CTL _MMIO(0x46008) +#define CDCLK_SQUASH_ENABLE REG_BIT(31) +#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24) +#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x)) +#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0) +#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x)) + /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) #define LCPLL2_CTL _MMIO(0x46014) @@ -10958,7 +11163,6 @@ enum skl_power_gate { _DKL_TX_DPCNTL1) #define _DKL_TX_DPCNTL2 0x2C8 -#define DKL_TX_LOADGEN_SHARING_PMD_DISABLE REG_BIT(12) #define DKL_TX_DP20BITMODE (1 << 2) #define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ @@ -11043,12 +11247,6 @@ enum skl_power_gate { #define DC_STATE_DEBUG_MASK_CORES (1 << 0) #define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1) -#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114) -#define BXT_REQ_DATA_MASK 0x3F -#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12 -#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12) -#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333 - #define BXT_D_CR_DRP0_DUNIT8 0x1000 #define BXT_D_CR_DRP0_DUNIT9 0x1200 #define BXT_D_CR_DRP0_DUNIT_START 8 @@ -11079,9 +11277,7 @@ enum skl_power_gate { #define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22) #define BXT_DRAM_TYPE_DDR4 (0x4 << 22) -#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666 #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04) -#define SKL_REQ_DATA_MASK (0xF << 0) #define DG1_GEAR_TYPE REG_BIT(16) #define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000) @@ -11345,6 +11541,51 @@ enum skl_power_gate { _PAL_PREC_MULTI_SEG_DATA_A, \ _PAL_PREC_MULTI_SEG_DATA_B) +#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4) + +/* Plane CSC Registers */ +#define _PLANE_CSC_RY_GY_1_A 0x70210 +#define _PLANE_CSC_RY_GY_2_A 0x70310 + +#define _PLANE_CSC_RY_GY_1_B 0x71210 +#define _PLANE_CSC_RY_GY_2_B 0x71310 + +#define _PLANE_CSC_RY_GY_1(pipe) _PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \ + _PLANE_CSC_RY_GY_1_B) +#define _PLANE_CSC_RY_GY_2(pipe) _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \ + _PLANE_INPUT_CSC_RY_GY_2_B) +#define PLANE_CSC_COEFF(pipe, plane, index) _MMIO_PLANE(plane, \ + _PLANE_CSC_RY_GY_1(pipe) + (index) * 4, \ + _PLANE_CSC_RY_GY_2(pipe) + (index) * 4) + +#define _PLANE_CSC_PREOFF_HI_1_A 0x70228 +#define _PLANE_CSC_PREOFF_HI_2_A 0x70328 + +#define _PLANE_CSC_PREOFF_HI_1_B 0x71228 +#define _PLANE_CSC_PREOFF_HI_2_B 0x71328 + +#define _PLANE_CSC_PREOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_1_A, \ + _PLANE_CSC_PREOFF_HI_1_B) +#define _PLANE_CSC_PREOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_2_A, \ + _PLANE_CSC_PREOFF_HI_2_B) +#define PLANE_CSC_PREOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_PREOFF_HI_1(pipe) + \ + (index) * 4, _PLANE_CSC_PREOFF_HI_2(pipe) + \ + (index) * 4) + +#define _PLANE_CSC_POSTOFF_HI_1_A 0x70234 +#define _PLANE_CSC_POSTOFF_HI_2_A 0x70334 + +#define _PLANE_CSC_POSTOFF_HI_1_B 0x71234 +#define _PLANE_CSC_POSTOFF_HI_2_B 0x71334 + +#define _PLANE_CSC_POSTOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_1_A, \ + _PLANE_CSC_POSTOFF_HI_1_B) +#define _PLANE_CSC_POSTOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_2_A, \ + _PLANE_CSC_POSTOFF_HI_2_B) +#define PLANE_CSC_POSTOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_POSTOFF_HI_1(pipe) + \ + (index) * 4, _PLANE_CSC_POSTOFF_HI_2(pipe) + \ + (index) * 4) + /* pipe CSC & degamma/gamma LUTs on CHV */ #define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900) #define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904) @@ -11611,6 +11852,16 @@ enum skl_power_gate { _ICL_DSI_IO_MODECTL_1) #define COMBO_PHY_MODE_DSI (1 << 0) +/* TGL DSI Chicken register */ +#define _TGL_DSI_CHKN_REG_0 0x6B0C0 +#define _TGL_DSI_CHKN_REG_1 0x6B8C0 +#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ + _TGL_DSI_CHKN_REG_0, \ + _TGL_DSI_CHKN_REG_1) +#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ + (byte_clocks)) + /* Display Stream Splitter Control */ #define DSS_CTL1 _MMIO(0x67400) #define SPLITTER_ENABLE (1 << 31) @@ -12356,11 +12607,19 @@ enum skl_power_gate { #define PMFLUSH_GAPL3UNBLOCK (1 << 21) #define PMFLUSHDONE_LNEBLK (1 << 22) +#define XEHP_L3NODEARBCFG _MMIO(0xb0b4) +#define XEHP_LNESPARE REG_BIT(19) + #define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ #define GEN12_GSMBASE _MMIO(0x108100) #define GEN12_DSMBASE _MMIO(0x1080C0) +#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014) +#define SGSI_SIDECLK_DIS REG_BIT(17) +#define SGGI_DIS REG_BIT(15) +#define SGR_DIS REG_BIT(13) + /* gamt regs */ #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) #define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ @@ -12734,4 +12993,10 @@ enum skl_power_gate { #define CLKREQ_POLICY _MMIO(0x101038) #define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1) +#define CLKGATE_DIS_MISC _MMIO(0x46534) +#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21) + +#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C) +#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 3839712ebd23..ad175d662b4e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,7 @@ #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" @@ -96,9 +97,9 @@ static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), - interruptible | I915_WAIT_PRIORITY, - timeout); + return i915_request_wait_timeout(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } struct kmem_cache *i915_request_slab_cache(void) @@ -113,6 +114,10 @@ static void i915_fence_release(struct dma_fence *fence) GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && rq->guc_prio != GUC_PRIO_FINI); + i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); + if (i915_vma_snapshot_present(&rq->batch_snapshot)) + i915_vma_snapshot_put_onstack(&rq->batch_snapshot); + /* * The request is put onto a RCU freelist (i.e. the address * is immediately reused), mark the fences as being freed now. @@ -186,19 +191,6 @@ void i915_request_notify_execute_cb_imm(struct i915_request *rq) __notify_execute_cb(rq, irq_work_imm); } -static void free_capture_list(struct i915_request *request) -{ - struct i915_capture_list *capture; - - capture = fetch_and_zero(&request->capture_list); - while (capture) { - struct i915_capture_list *next = capture->next; - - kfree(capture); - capture = next; - } -} - static void __i915_request_fill(struct i915_request *rq, u8 val) { void *vaddr = rq->ring->vaddr; @@ -303,6 +295,37 @@ static void __rq_cancel_watchdog(struct i915_request *rq) i915_request_put(rq); } +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/** + * i915_request_free_capture_list - Free a capture list + * @capture: Pointer to the first list item or NULL + * + */ +void i915_request_free_capture_list(struct i915_capture_list *capture) +{ + while (capture) { + struct i915_capture_list *next = capture->next; + + i915_vma_snapshot_put(capture->vma_snapshot); + capture = next; + } +} + +#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list) + +#define clear_capture_list(_rq) ((_rq)->capture_list = NULL) + +#else + +#define i915_request_free_capture_list(_a) do {} while (0) + +#define assert_capture_list_is_null(_a) do {} while (0) + +#define clear_capture_list(_rq) do {} while (0) + +#endif + bool i915_request_retire(struct i915_request *rq) { if (!__i915_request_is_complete(rq)) @@ -339,7 +362,7 @@ bool i915_request_retire(struct i915_request *rq) } if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) - atomic_dec(&rq->engine->gt->rps.num_waiters); + intel_rps_dec_waiters(&rq->engine->gt->rps); /* * We only loosely track inflight requests across preemption, @@ -359,7 +382,6 @@ bool i915_request_retire(struct i915_request *rq) intel_context_exit(rq->context); intel_context_unpin(rq->context); - free_capture_list(rq); i915_sched_node_fini(&rq->sched); i915_request_put(rq); @@ -719,7 +741,7 @@ void i915_request_cancel(struct i915_request *rq, int error) intel_context_cancel_request(rq->context, rq); } -static int __i915_sw_fence_call +static int submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *request = @@ -755,7 +777,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } -static int __i915_sw_fence_call +static int semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); @@ -829,13 +851,18 @@ static void __i915_request_ctor(void *arg) i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify); - dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); - - rq->capture_list = NULL; + clear_capture_list(rq); + rq->batch_snapshot.present = false; init_llist_head(&rq->execute_cb); } +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#define clear_batch_ptr(_rq) ((_rq)->batch = NULL) +#else +#define clear_batch_ptr(_a) do {} while (0) +#endif + struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { @@ -905,17 +932,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->ring = ce->ring; rq->execution_mask = ce->engine->mask; - kref_init(&rq->fence.refcount); - rq->fence.flags = 0; - rq->fence.error = 0; - INIT_LIST_HEAD(&rq->fence.cb_list); - ret = intel_timeline_get_seqno(tl, rq, &seqno); if (ret) goto err_free; - rq->fence.context = tl->fence_context; - rq->fence.seqno = seqno; + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, + tl->fence_context, seqno); RCU_INIT_POINTER(rq->timeline, tl); rq->hwsp_seqno = tl->hwsp_seqno; @@ -932,10 +954,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) i915_sched_node_reinit(&rq->sched); /* No zalloc, everything must be cleared after use */ - rq->batch = NULL; + clear_batch_ptr(rq); __rq_init_watchdog(rq); - GEM_BUG_ON(rq->capture_list); + assert_capture_list_is_null(rq); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); + GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot)); /* * Reserve space in the ring buffer for all the commands required to @@ -1152,6 +1175,12 @@ __emit_semaphore_wait(struct i915_request *to, return 0; } +static bool +can_use_semaphore_wait(struct i915_request *to, struct i915_request *from) +{ + return to->engine->gt->ggtt == from->engine->gt->ggtt; +} + static int emit_semaphore_wait(struct i915_request *to, struct i915_request *from, @@ -1160,6 +1189,9 @@ emit_semaphore_wait(struct i915_request *to, const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask; struct i915_sw_fence *wait = &to->submit; + if (!can_use_semaphore_wait(to, from)) + goto await_fence; + if (!intel_context_use_semaphores(to->context)) goto await_fence; @@ -1263,7 +1295,8 @@ __i915_request_await_execution(struct i915_request *to, * immediate execution, and so we must wait until it reaches the * active slot. */ - if (intel_engine_has_semaphores(to->engine) && + if (can_use_semaphore_wait(to, from) && + intel_engine_has_semaphores(to->engine) && !i915_request_has_initial_breadcrumb(to)) { err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); if (err < 0) @@ -1332,6 +1365,25 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) return err; } +static inline bool is_parallel_rq(struct i915_request *rq) +{ + return intel_context_is_parallel(rq->context); +} + +static inline struct intel_context *request_to_parent(struct i915_request *rq) +{ + return intel_context_to_parent(rq->context); +} + +static bool is_same_parallel_context(struct i915_request *to, + struct i915_request *from) +{ + if (is_parallel_rq(to)) + return request_to_parent(to) == request_to_parent(from); + + return false; +} + int i915_request_await_execution(struct i915_request *rq, struct dma_fence *fence) @@ -1363,11 +1415,14 @@ i915_request_await_execution(struct i915_request *rq, * want to run our callback in all cases. */ - if (dma_fence_is_i915(fence)) + if (dma_fence_is_i915(fence)) { + if (is_same_parallel_context(rq, to_request(fence))) + continue; ret = __i915_request_await_execution(rq, to_request(fence)); - else + } else { ret = i915_request_await_external(rq, fence); + } if (ret < 0) return ret; } while (--nchild); @@ -1468,10 +1523,13 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) fence)) continue; - if (dma_fence_is_i915(fence)) + if (dma_fence_is_i915(fence)) { + if (is_same_parallel_context(rq, to_request(fence))) + continue; ret = i915_request_await_request(rq, to_request(fence)); - else + } else { ret = i915_request_await_external(rq, fence); + } if (ret < 0) return ret; @@ -1523,35 +1581,51 @@ i915_request_await_object(struct i915_request *to, } static struct i915_request * -__i915_request_add_to_timeline(struct i915_request *rq) +__i915_request_ensure_parallel_ordering(struct i915_request *rq, + struct intel_timeline *timeline) { - struct intel_timeline *timeline = i915_request_timeline(rq); struct i915_request *prev; - /* - * Dependency tracking and request ordering along the timeline - * is special cased so that we can eliminate redundant ordering - * operations while building the request (we know that the timeline - * itself is ordered, and here we guarantee it). - * - * As we know we will need to emit tracking along the timeline, - * we embed the hooks into our request struct -- at the cost of - * having to have specialised no-allocation interfaces (which will - * be beneficial elsewhere). - * - * A second benefit to open-coding i915_request_await_request is - * that we can apply a slight variant of the rules specialised - * for timelines that jump between engines (such as virtual engines). - * If we consider the case of virtual engine, we must emit a dma-fence - * to prevent scheduling of the second request until the first is - * complete (to maximise our greedy late load balancing) and this - * precludes optimising to use semaphores serialisation of a single - * timeline across engines. - */ + GEM_BUG_ON(!is_parallel_rq(rq)); + + prev = request_to_parent(rq)->parallel.last_rq; + if (prev) { + if (!__i915_request_is_complete(prev)) { + i915_sw_fence_await_sw_fence(&rq->submit, + &prev->submit, + &rq->submitq); + + if (rq->engine->sched_engine->schedule) + __i915_sched_node_add_dependency(&rq->sched, + &prev->sched, + &rq->dep, + 0); + } + i915_request_put(prev); + } + + request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); + + return to_request(__i915_active_fence_set(&timeline->last_request, + &rq->fence)); +} + +static struct i915_request * +__i915_request_ensure_ordering(struct i915_request *rq, + struct intel_timeline *timeline) +{ + struct i915_request *prev; + + GEM_BUG_ON(is_parallel_rq(rq)); + prev = to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); + if (prev && !__i915_request_is_complete(prev)) { bool uses_guc = intel_engine_uses_guc(rq->engine); + bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask | + rq->engine->mask); + bool same_context = prev->context == rq->context; /* * The requests are supposed to be kept in order. However, @@ -1559,13 +1633,11 @@ __i915_request_add_to_timeline(struct i915_request *rq) * is used as a barrier for external modification to this * context. */ - GEM_BUG_ON(prev->context == rq->context && + GEM_BUG_ON(same_context && i915_seqno_passed(prev->fence.seqno, rq->fence.seqno)); - if ((!uses_guc && - is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) || - (uses_guc && prev->context == rq->context)) + if ((same_context && uses_guc) || (!uses_guc && pow2)) i915_sw_fence_await_sw_fence(&rq->submit, &prev->submit, &rq->submitq); @@ -1580,6 +1652,50 @@ __i915_request_add_to_timeline(struct i915_request *rq) 0); } + return prev; +} + +static struct i915_request * +__i915_request_add_to_timeline(struct i915_request *rq) +{ + struct intel_timeline *timeline = i915_request_timeline(rq); + struct i915_request *prev; + + /* + * Dependency tracking and request ordering along the timeline + * is special cased so that we can eliminate redundant ordering + * operations while building the request (we know that the timeline + * itself is ordered, and here we guarantee it). + * + * As we know we will need to emit tracking along the timeline, + * we embed the hooks into our request struct -- at the cost of + * having to have specialised no-allocation interfaces (which will + * be beneficial elsewhere). + * + * A second benefit to open-coding i915_request_await_request is + * that we can apply a slight variant of the rules specialised + * for timelines that jump between engines (such as virtual engines). + * If we consider the case of virtual engine, we must emit a dma-fence + * to prevent scheduling of the second request until the first is + * complete (to maximise our greedy late load balancing) and this + * precludes optimising to use semaphores serialisation of a single + * timeline across engines. + * + * We do not order parallel submission requests on the timeline as each + * parallel submission context has its own timeline and the ordering + * rules for parallel requests are that they must be submitted in the + * order received from the execbuf IOCTL. So rather than using the + * timeline we store a pointer to last request submitted in the + * relationship in the gem context and insert a submission fence + * between that request and request passed into this function or + * alternatively we use completion fence if gem context has a single + * timeline and this is the first submission of an execbuf IOCTL. + */ + if (likely(!is_parallel_rq(rq))) + prev = __i915_request_ensure_ordering(rq, timeline); + else + prev = __i915_request_ensure_parallel_ordering(rq, timeline); + /* * Make sure that no request gazumped us - if it was allocated after * our i915_request_alloc() and called __i915_request_add() before @@ -1771,23 +1887,27 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) } /** - * i915_request_wait - wait until execution of request has finished + * i915_request_wait_timeout - wait until execution of request has finished * @rq: the request to wait upon * @flags: how to wait * @timeout: how long to wait in jiffies * - * i915_request_wait() waits for the request to be completed, for a + * i915_request_wait_timeout() waits for the request to be completed, for a * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an * unbounded wait). * * Returns the remaining time (in jiffies) if the request completed, which may - * be zero or -ETIME if the request is unfinished after the timeout expires. + * be zero if the request is unfinished after the timeout expires. + * If the timeout is 0, it will return 1 if the fence is signaled. + * * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is * pending before the request completes. + * + * NOTE: This function has the same wait semantics as dma-fence. */ -long i915_request_wait(struct i915_request *rq, - unsigned int flags, - long timeout) +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) { const int state = flags & I915_WAIT_INTERRUPTIBLE ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1797,7 +1917,7 @@ long i915_request_wait(struct i915_request *rq, GEM_BUG_ON(timeout < 0); if (dma_fence_is_signaled(&rq->fence)) - return timeout; + return timeout ?: 1; if (!timeout) return -ETIME; @@ -1835,7 +1955,7 @@ long i915_request_wait(struct i915_request *rq, * completion. That requires having a good predictor for the request * duration, which we currently lack. */ - if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) && + if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT && __i915_spin_request(rq, state)) goto out; @@ -1906,6 +2026,39 @@ out: return timeout; } +/** + * i915_request_wait - wait until execution of request has finished + * @rq: the request to wait upon + * @flags: how to wait + * @timeout: how long to wait in jiffies + * + * i915_request_wait() waits for the request to be completed, for a + * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an + * unbounded wait). + * + * Returns the remaining time (in jiffies) if the request completed, which may + * be zero or -ETIME if the request is unfinished after the timeout expires. + * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is + * pending before the request completes. + * + * NOTE: This function behaves differently from dma-fence wait semantics for + * timeout = 0. It returns 0 on success, and -ETIME if not signaled. + */ +long i915_request_wait(struct i915_request *rq, + unsigned int flags, + long timeout) +{ + long ret = i915_request_wait_timeout(rq, flags, timeout); + + if (!ret) + return -ETIME; + + if (ret > 0 && !timeout) + return 0; + + return ret; +} + static int print_sched_attr(const struct i915_sched_attr *attr, char *buf, int x, int len) { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 1bc1349ba3c2..0ed01979491b 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -40,6 +40,7 @@ #include "i915_scheduler.h" #include "i915_selftest.h" #include "i915_sw_fence.h" +#include "i915_vma_snapshot.h" #include <uapi/drm/i915_drm.h> @@ -48,11 +49,17 @@ struct drm_i915_gem_object; struct drm_printer; struct i915_request; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct i915_capture_list { + struct i915_vma_snapshot *vma_snapshot; struct i915_capture_list *next; - struct i915_vma *vma; }; +void i915_request_free_capture_list(struct i915_capture_list *capture); +#else +#define i915_request_free_capture_list(_a) do {} while (0) +#endif + #define RQ_TRACE(rq, fmt, ...) do { \ const struct i915_request *rq__ = (rq); \ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ @@ -139,6 +146,29 @@ enum { * the GPU. Here we track such boost requests on a per-request basis. */ I915_FENCE_FLAG_BOOST, + + /* + * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a + * parent-child relationship (parallel submission, multi-lrc) should + * trigger a submission to the GuC rather than just moving the context + * tail. + */ + I915_FENCE_FLAG_SUBMIT_PARALLEL, + + /* + * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a + * parent-child relationship (parallel submission, multi-lrc) that + * hit an error while generating requests in the execbuf IOCTL. + * Indicates this request should be skipped as another request in + * submission / relationship encoutered an error. + */ + I915_FENCE_FLAG_SKIP_PARALLEL, + + /* + * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite + * fence (dma_fence_array) and i915 generated for parallel submission. + */ + I915_FENCE_FLAG_COMPOSITE, }; /** @@ -218,6 +248,11 @@ struct i915_request { }; struct llist_head execute_cb; struct i915_sw_fence semaphore; + /** + * @submit_work: complete submit fence from an IRQ if needed for + * locking hierarchy reasons. + */ + struct irq_work submit_work; /* * A list of everyone we wait upon, and everyone who waits upon us. @@ -261,10 +296,12 @@ struct i915_request { /** Preallocate space in the ring for the emitting the request */ u32 reserved_space; - /** Batch buffer related to this request if any (used for - * error state dump only). - */ - struct i915_vma *batch; + /** Batch buffer pointer for selftest internal use. */ + I915_SELFTEST_DECLARE(struct i915_vma *batch); + + struct i915_vma_snapshot batch_snapshot; + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) /** * Additional buffers requested by userspace to be captured upon * a GPU hang. The vma/obj on this list are protected by their @@ -272,6 +309,7 @@ struct i915_request { * on the active_list (of their final request). */ struct i915_capture_list *capture_list; +#endif /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; @@ -285,18 +323,23 @@ struct i915_request { struct hrtimer timer; } watchdog; - /* - * Requests may need to be stalled when using GuC submission waiting for - * certain GuC operations to complete. If that is the case, stalled - * requests are added to a per context list of stalled requests. The - * below list_head is the link in that list. + /** + * @guc_fence_link: Requests may need to be stalled when using GuC + * submission waiting for certain GuC operations to complete. If that is + * the case, stalled requests are added to a per context list of stalled + * requests. The below list_head is the link in that list. Protected by + * ce->guc_state.lock. */ struct list_head guc_fence_link; /** - * Priority level while the request is inflight. Differs from i915 - * scheduler priority. See comment above - * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. + * @guc_prio: Priority level while the request is in flight. Differs + * from i915 scheduler priority. See comment above + * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by + * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and + * GUC_PRIO_FINI) outside the GuC priority range are used to indicate + * if the priority has not been initialized yet or if no more updates + * are possible because the request has completed. */ #define GUC_PRIO_INIT 0xff #define GUC_PRIO_FINI 0xfe @@ -381,6 +424,11 @@ void i915_request_unsubmit(struct i915_request *request); void i915_request_cancel(struct i915_request *rq, int error); +long i915_request_wait_timeout(struct i915_request *rq, + unsigned int flags, + long timeout) + __attribute__((nonnull(1))); + long i915_request_wait(struct i915_request *rq, unsigned int flags, long timeout) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 4a6712dca838..41f2adb6a583 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -41,8 +41,32 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } +static void i915_refct_sgt_release(struct kref *ref) +{ + struct i915_refct_sgt *rsgt = + container_of(ref, typeof(*rsgt), kref); + + sg_free_table(&rsgt->table); + kfree(rsgt); +} + +static const struct i915_refct_sgt_ops rsgt_ops = { + .release = i915_refct_sgt_release +}; + +/** + * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops + * @rsgt: The struct i915_refct_sgt to initialize. + * size: The size of the underlying memory buffer. + */ +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) +{ + __i915_refct_sgt_init(rsgt, size, &rsgt_ops); +} + /** - * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node + * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct + * drm_mm_node * @node: The drm_mm_node. * @region_start: An offset to add to the dma addresses of the sg list. * @@ -50,25 +74,28 @@ bool i915_sg_trim(struct sg_table *orig_st) * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start) { const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ u64 segment_pages = max_segment >> PAGE_SHIFT; u64 block_size, offset, prev_end; + struct i915_refct_sgt *rsgt; struct sg_table *st; struct scatterlist *sg; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); + st = &rsgt->table; if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -104,11 +131,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } /** - * i915_sg_from_buddy_resource - Create an sg_table from a struct + * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct * i915_buddy_block list * @res: The struct i915_ttm_buddy_resource. * @region_start: An offset to add to the dma addresses of the sg list. @@ -117,11 +144,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); const u64 size = res->num_pages << PAGE_SHIFT; @@ -129,18 +156,21 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, struct i915_buddy_mm *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; struct i915_buddy_block *block; + struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; resource_size_t prev_end; GEM_BUG_ON(list_empty(blocks)); - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, size); + st = &rsgt->table; if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -181,7 +211,7 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b8bd5925b03f..12c6a1684081 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -144,10 +144,78 @@ static inline unsigned int i915_sg_segment_size(void) bool i915_sg_trim(struct sg_table *orig_st); -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start); +/** + * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt + */ +struct i915_refct_sgt_ops { + /** + * release() - Free the memory of the struct i915_refct_sgt + * @ref: struct kref that is embedded in the struct i915_refct_sgt + */ + void (*release)(struct kref *ref); +}; + +/** + * struct i915_refct_sgt - A refcounted scatter-gather table + * @kref: struct kref for refcounting + * @table: struct sg_table holding the scatter-gather table itself. Note that + * @table->sgl = NULL can be used to determine whether a scatter-gather table + * is present or not. + * @size: The size in bytes of the underlying memory buffer + * @ops: The operations structure. + */ +struct i915_refct_sgt { + struct kref kref; + struct sg_table table; + size_t size; + const struct i915_refct_sgt_ops *ops; +}; + +/** + * i915_refct_sgt_put - Put a refcounted sg-table + * @rsgt the struct i915_refct_sgt to put. + */ +static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt) +{ + if (rsgt) + kref_put(&rsgt->kref, rsgt->ops->release); +} + +/** + * i915_refct_sgt_get - Get a refcounted sg-table + * @rsgt the struct i915_refct_sgt to get. + */ +static inline struct i915_refct_sgt * +i915_refct_sgt_get(struct i915_refct_sgt *rsgt) +{ + kref_get(&rsgt->kref); + return rsgt; +} + +/** + * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom + * operations structure + * @rsgt The struct i915_refct_sgt to initialize. + * @size: Size in bytes of the underlying memory buffer. + * @ops: A customized operations structure in case the refcounted sg-list + * is embedded into another structure. + */ +static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, + size_t size, + const struct i915_refct_sgt_ops *ops) +{ + kref_init(&rsgt->kref); + rsgt->table.sgl = NULL; + rsgt->size = size; + rsgt->ops = ops; +} + +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); + +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start); -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start); +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start); #endif diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index c589a681da77..2a74a9a1cafe 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -18,7 +18,9 @@ #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static DEFINE_SPINLOCK(i915_sw_fence_lock); +#endif #define WQ_FLAG_BITS \ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) @@ -34,7 +36,7 @@ enum { static void *i915_sw_fence_debug_hint(void *addr) { - return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); + return (void *)(((struct i915_sw_fence *)addr)->fn); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -126,10 +128,7 @@ static inline void debug_fence_assert(struct i915_sw_fence *fence) static int __i915_sw_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - i915_sw_fence_notify_t fn; - - fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK); - return fn(fence, state); + return fence->fn(fence, state); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -242,10 +241,13 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, const char *name, struct lock_class_key *key) { - BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); + BUG_ON(!fn); __init_waitqueue_head(&fence->wait, name, key); - fence->flags = (unsigned long)fn; + fence->fn = fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG + fence->flags = 0; +#endif i915_sw_fence_reinit(fence); } @@ -257,7 +259,6 @@ void i915_sw_fence_reinit(struct i915_sw_fence *fence) atomic_set(&fence->pending, 1); fence->error = 0; - I915_SW_FENCE_BUG_ON(!fence->flags); I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); } @@ -279,6 +280,7 @@ static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, return 0; } +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, const struct i915_sw_fence * const signaler) { @@ -322,9 +324,6 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, unsigned long flags; bool err; - if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG)) - return false; - spin_lock_irqsave(&i915_sw_fence_lock, flags); err = __i915_sw_fence_check_if_after(fence, signaler); __i915_sw_fence_clear_checked_bit(fence); @@ -332,6 +331,13 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, return err; } +#else +static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, + const struct i915_sw_fence * const signaler) +{ + return false; +} +#endif static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, @@ -572,56 +578,25 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp) { - struct dma_fence *excl; + struct dma_resv_iter cursor; + struct dma_fence *f; int ret = 0, pending; debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); - if (write) { - struct dma_fence **shared; - unsigned int count, i; - - ret = dma_resv_get_fences(resv, &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - if (shared[i]->ops == exclude) - continue; - - pending = i915_sw_fence_await_dma_fence(fence, - shared[i], - timeout, - gfp); - if (pending < 0) { - ret = pending; - break; - } - - ret |= pending; - } - - for (i = 0; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); - } else { - excl = dma_resv_get_excl_unlocked(resv); - } - - if (ret >= 0 && excl && excl->ops != exclude) { - pending = i915_sw_fence_await_dma_fence(fence, - excl, - timeout, + dma_resv_iter_begin(&cursor, resv, write); + dma_resv_for_each_fence_unlocked(&cursor, f) { + pending = i915_sw_fence_await_dma_fence(fence, f, timeout, gfp); - if (pending < 0) + if (pending < 0) { ret = pending; - else - ret |= pending; - } - - dma_fence_put(excl); + break; + } + ret |= pending; + } + dma_resv_iter_end(&cursor); return ret; } diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 30a863353ee6..a7c603bc1b01 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -17,26 +17,27 @@ struct completion; struct dma_resv; +struct i915_sw_fence; + +enum i915_sw_fence_notify { + FENCE_COMPLETE, + FENCE_FREE +}; + +typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, + enum i915_sw_fence_notify state); struct i915_sw_fence { wait_queue_head_t wait; + i915_sw_fence_notify_t fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG unsigned long flags; +#endif atomic_t pending; int error; }; #define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */ -#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */ -#define I915_SW_FENCE_MASK (~3) - -enum i915_sw_fence_notify { - FENCE_COMPLETE, - FENCE_FREE -}; - -typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, - enum i915_sw_fence_notify state); -#define __i915_sw_fence_call __aligned(4) void __i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn, diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 5b33ef23d54c..d2e56b387993 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -23,7 +23,7 @@ static void fence_work(struct work_struct *work) dma_fence_put(&f->dma); } -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct dma_fence_work *f = container_of(fence, typeof(*f), chain); diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index de0e224b56ce..23777d500cdf 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -5,6 +5,7 @@ #include <linux/vga_switcheroo.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_switcheroo.h" @@ -24,12 +25,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(pdev, PCI_D0); - i915_resume_switcheroo(i915); + i915_driver_resume_switcheroo(i915); i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; } else { drm_info(&i915->drm, "switched off\n"); i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(i915, pmm); + i915_driver_suspend_switcheroo(i915, pmm); i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index cdf0e9c6fd73..59d441cedc75 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -37,7 +37,6 @@ #include "i915_drv.h" #include "i915_sysfs.h" #include "intel_pm.h" -#include "intel_sideband.h" static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { @@ -280,7 +279,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct intel_rps *rps = &i915->gt.rps; - return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq)); + return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps)); } static ssize_t gt_boost_freq_mhz_store(struct device *kdev, @@ -289,7 +288,6 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt.rps; - bool boost = false; ssize_t ret; u32 val; @@ -297,21 +295,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, if (ret) return ret; - /* Validate against (static) hardware limits */ - val = intel_freq_opcode(rps, val); - if (val < rps->min_freq || val > rps->max_freq) - return -EINVAL; - - mutex_lock(&rps->lock); - if (val != rps->boost_freq) { - rps->boost_freq = val; - boost = atomic_read(&rps->num_waiters); - } - mutex_unlock(&rps->lock); - if (boost) - schedule_work(&rps->work); + ret = intel_rps_set_boost_frequency(rps, val); - return count; + return ret ?: count; } static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 806ad688274b..37b5c9e9d260 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM i915 + #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _I915_TRACE_H_ @@ -8,511 +12,11 @@ #include <drm/drm_drv.h> -#include "display/intel_crtc.h" -#include "display/intel_display_types.h" #include "gt/intel_engine.h" #include "i915_drv.h" #include "i915_irq.h" -#undef TRACE_SYSTEM -#define TRACE_SYSTEM i915 -#define TRACE_INCLUDE_FILE i915_trace - -/* watermark/fifo updates */ - -TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(enum pipe, pipe) - ), - - TP_fast_assign( - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc *it__; - for_each_intel_crtc(&dev_priv->drm, it__) { - __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); - __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); - } - __entry->pipe = crtc->pipe; - ), - - TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(intel_pipe_crc, - TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), - TP_ARGS(crtc, crcs), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(u32, crcs, 5) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], - __entry->crcs[3], __entry->crcs[4]) -); - -TRACE_EVENT(intel_cpu_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_pch_fifo_underrun, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder), - TP_ARGS(dev_priv, pch_transcoder), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - enum pipe pipe = pch_transcoder; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - __entry->pipe = pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pch transcoder %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), - __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_memory_cxsr, - TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new), - TP_ARGS(dev_priv, old, new), - - TP_STRUCT__entry( - __array(u32, frame, 3) - __array(u32, scanline, 3) - __field(bool, old) - __field(bool, new) - ), - - TP_fast_assign( - struct intel_crtc *crtc; - for_each_intel_crtc(&dev_priv->drm, crtc) { - __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); - __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); - } - __entry->old = old; - __entry->new = new; - ), - - TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", - onoff(__entry->old), onoff(__entry->new), - __entry->frame[PIPE_A], __entry->scanline[PIPE_A], - __entry->frame[PIPE_B], __entry->scanline[PIPE_B], - __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) -); - -TRACE_EVENT(g4x_wm, - TP_PROTO(struct intel_crtc *crtc, const struct g4x_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u16, primary) - __field(u16, sprite) - __field(u16, cursor) - __field(u16, sr_plane) - __field(u16, sr_cursor) - __field(u16, sr_fbc) - __field(u16, hpll_plane) - __field(u16, hpll_cursor) - __field(u16, hpll_fbc) - __field(bool, cxsr) - __field(bool, hpll) - __field(bool, fbc) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - __entry->sr_fbc = wm->sr.fbc; - __entry->hpll_plane = wm->hpll.plane; - __entry->hpll_cursor = wm->hpll.cursor; - __entry->hpll_fbc = wm->hpll.fbc; - __entry->cxsr = wm->cxsr; - __entry->hpll = wm->hpll_en; - __entry->fbc = wm->fbc_en; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline, - __entry->primary, __entry->sprite, __entry->cursor, - yesno(__entry->cxsr), __entry->sr_plane, __entry->sr_cursor, __entry->sr_fbc, - yesno(__entry->hpll), __entry->hpll_plane, __entry->hpll_cursor, __entry->hpll_fbc, - yesno(__entry->fbc)) -); - -TRACE_EVENT(vlv_wm, - TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm), - TP_ARGS(crtc, wm), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, level) - __field(u32, cxsr) - __field(u32, primary) - __field(u32, sprite0) - __field(u32, sprite1) - __field(u32, cursor) - __field(u32, sr_plane) - __field(u32, sr_cursor) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->level = wm->level; - __entry->cxsr = wm->cxsr; - __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; - __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; - __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1]; - __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR]; - __entry->sr_plane = wm->sr.plane; - __entry->sr_cursor = wm->sr.cursor; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->level, __entry->cxsr, - __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor, - __entry->sr_plane, __entry->sr_cursor) -); - -TRACE_EVENT(vlv_fifo_size, - TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size), - TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, sprite0_start) - __field(u32, sprite1_start) - __field(u32, fifo_size) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->sprite0_start = sprite0_start; - __entry->sprite1_start = sprite1_start; - __entry->fifo_size = fifo_size; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->sprite0_start, - __entry->sprite1_start, __entry->fifo_size) -); - -/* plane updates */ - -TRACE_EVENT(intel_update_plane, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __array(int, src, 4) - __array(int, dst, 4) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); - memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline, - DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), - DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) -); - -TRACE_EVENT(intel_disable_plane, - TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), - TP_ARGS(plane, crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __string(name, plane->name) - ), - - TP_fast_assign( - __assign_str(name, plane->name); - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __get_str(name), - __entry->frame, __entry->scanline) -); - -/* fbc */ - -TRACE_EVENT(intel_fbc_activate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_deactivate, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -TRACE_EVENT(intel_fbc_nuke, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, __entry->scanline) -); - -/* pipe updates */ - -TRACE_EVENT(intel_pipe_update_start, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = intel_crtc_get_vblank_counter(crtc); - __entry->scanline = intel_get_crtc_scanline(crtc); - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_vblank_evaded, - TP_PROTO(struct intel_crtc *crtc), - TP_ARGS(crtc), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - __field(u32, min) - __field(u32, max) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = crtc->debug.start_vbl_count; - __entry->scanline = crtc->debug.scanline_start; - __entry->min = crtc->debug.min_vbl; - __entry->max = crtc->debug.max_vbl; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline, __entry->min, __entry->max) -); - -TRACE_EVENT(intel_pipe_update_end, - TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end), - TP_ARGS(crtc, frame, scanline_end), - - TP_STRUCT__entry( - __field(enum pipe, pipe) - __field(u32, frame) - __field(u32, scanline) - ), - - TP_fast_assign( - __entry->pipe = crtc->pipe; - __entry->frame = frame; - __entry->scanline = scanline_end; - ), - - TP_printk("pipe %c, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->frame, - __entry->scanline) -); - -/* frontbuffer tracking */ - -TRACE_EVENT(intel_frontbuffer_invalidate, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - -TRACE_EVENT(intel_frontbuffer_flush, - TP_PROTO(unsigned int frontbuffer_bits, unsigned int origin), - TP_ARGS(frontbuffer_bits, origin), - - TP_STRUCT__entry( - __field(unsigned int, frontbuffer_bits) - __field(unsigned int, origin) - ), - - TP_fast_assign( - __entry->frontbuffer_bits = frontbuffer_bits; - __entry->origin = origin; - ), - - TP_printk("frontbuffer_bits=0x%08x, origin=%u", - __entry->frontbuffer_bits, __entry->origin) -); - /* object tracking */ TRACE_EVENT(i915_gem_object_create, @@ -794,7 +298,6 @@ DECLARE_EVENT_CLASS(i915_request, TP_STRUCT__entry( __field(u32, dev) __field(u64, ctx) - __field(u32, guc_id) __field(u16, class) __field(u16, instance) __field(u32, seqno) @@ -805,16 +308,14 @@ DECLARE_EVENT_CLASS(i915_request, __entry->dev = rq->engine->i915->drm.primary->index; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; - __entry->guc_id = rq->context->guc_id; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->tail = rq->tail; ), - TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u", __entry->dev, __entry->class, __entry->instance, - __entry->guc_id, __entry->ctx, __entry->seqno, - __entry->tail) + __entry->ctx, __entry->seqno, __entry->tail) ); DEFINE_EVENT(i915_request, i915_request_add, @@ -903,23 +404,19 @@ DECLARE_EVENT_CLASS(intel_context, __field(u32, guc_id) __field(int, pin_count) __field(u32, sched_state) - __field(u32, guc_sched_state_no_lock) __field(u8, guc_prio) ), TP_fast_assign( - __entry->guc_id = ce->guc_id; + __entry->guc_id = ce->guc_id.id; __entry->pin_count = atomic_read(&ce->pin_count); __entry->sched_state = ce->guc_state.sched_state; - __entry->guc_sched_state_no_lock = - atomic_read(&ce->guc_sched_state_no_lock); - __entry->guc_prio = ce->guc_prio; + __entry->guc_prio = ce->guc_state.prio; ), - TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u", + TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u", __entry->guc_id, __entry->pin_count, __entry->sched_state, - __entry->guc_sched_state_no_lock, __entry->guc_prio) ); @@ -1246,7 +743,7 @@ DECLARE_EVENT_CLASS(i915_context, TP_fast_assign( __entry->dev = ctx->i915->drm.primary->index; __entry->ctx = ctx; - __entry->vm = rcu_access_pointer(ctx->vm); + __entry->vm = ctx->vm; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p", @@ -1267,5 +764,7 @@ DEFINE_EVENT(i915_context, i915_context_free, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 +#define TRACE_INCLUDE_FILE i915_trace #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index 6877362f6b85..d59fbb019032 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -126,12 +126,30 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man, kfree(bman_res); } +static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); + struct i915_buddy_block *block; + + mutex_lock(&bman->lock); + drm_printf(printer, "default_page_size: %lluKiB\n", + bman->default_page_size >> 10); + + i915_buddy_print(&bman->mm, printer); + + drm_printf(printer, "reserved:\n"); + list_for_each_entry(block, &bman->reserved, link) + i915_buddy_block_print(&bman->mm, block, printer); + mutex_unlock(&bman->lock); +} + static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = { .alloc = i915_ttm_buddy_man_alloc, .free = i915_ttm_buddy_man_free, + .debug = i915_ttm_buddy_man_debug, }; - /** * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager * @bdev: The ttm device diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 5259edacde38..7a5925072466 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -30,6 +30,7 @@ #include <linux/sched.h> #include <linux/types.h> #include <linux/workqueue.h> +#include <linux/sched/clock.h> struct drm_i915_private; struct timer_list; @@ -458,17 +459,4 @@ static inline bool timer_expired(const struct timer_list *t) return timer_active(t) && !timer_pending(t); } -/* - * This is a lookalike for IS_ENABLED() that takes a kconfig value, - * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero - * i.e. whether the configuration is active. Wrapping up the config inside - * a boolean context prevents clang and smatch from complaining about potential - * issues in confusing logical-&& with bitwise-& for constants. - * - * Sadly IS_ENABLED() itself does not work with kconfig values. - * - * Returns 0 if @config is 0, 1 if set to any value. - */ -#define IS_ACTIVE(config) ((config) != 0) - #endif /* !__I915_UTILS_H */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4b7fc4647e46..927f0d4f8e11 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -40,12 +40,12 @@ static struct kmem_cache *slab_vmas; -struct i915_vma *i915_vma_alloc(void) +static struct i915_vma *i915_vma_alloc(void) { return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); } -void i915_vma_free(struct i915_vma *vma) +static void i915_vma_free(struct i915_vma *vma) { return kmem_cache_free(slab_vmas, vma); } @@ -56,8 +56,6 @@ void i915_vma_free(struct i915_vma *vma) static void vma_print_allocator(struct i915_vma *vma, const char *reason) { - unsigned long *entries; - unsigned int nr_entries; char buf[512]; if (!vma->node.stack) { @@ -66,8 +64,7 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) return; } - nr_entries = stack_depot_fetch(vma->node.stack, &entries); - stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); + stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0); DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", vma->node.start, vma->node.size, reason, buf); } @@ -116,7 +113,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; vma->obj = obj; - vma->resv = obj->base.resv; vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; @@ -349,7 +345,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); rcu_read_unlock(); if (fence) { - err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + err = dma_fence_wait(fence, true); dma_fence_put(fence); } } @@ -357,6 +353,32 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) return err; } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +static int i915_vma_verify_bind_complete(struct i915_vma *vma) +{ + int err = 0; + + if (i915_active_has_exclusive(&vma->active)) { + struct dma_fence *fence = + i915_active_fence_get(&vma->active.excl); + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) + err = fence->error; + else + err = -EBUSY; + + dma_fence_put(fence); + } + + return err; +} +#else +#define i915_vma_verify_bind_complete(_vma) 0 +#endif + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map @@ -426,11 +448,16 @@ int i915_vma_bind(struct i915_vma *vma, work->base.dma.error = 0; /* enable the queue_work() */ + __i915_gem_object_pin_pages(vma->obj); + work->pinned = i915_gem_object_get(vma->obj); + } else { if (vma->obj) { - __i915_gem_object_pin_pages(vma->obj); - work->pinned = i915_gem_object_get(vma->obj); + int ret; + + ret = i915_gem_object_wait_moving_fence(vma->obj, true); + if (ret) + return ret; } - } else { vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } @@ -452,6 +479,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); + GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { @@ -670,7 +698,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } color = 0; - if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + if (i915_vm_has_cache_coloring(vma->vm)) color = vma->obj->cache_level; if (flags & PIN_OFFSET_FIXED) { @@ -795,17 +823,14 @@ unpinned: static int vma_get_pages(struct i915_vma *vma) { int err = 0; - bool pinned_pages = false; + bool pinned_pages = true; if (atomic_add_unless(&vma->pages_count, 1, 0)) return 0; - if (vma->obj) { - err = i915_gem_object_pin_pages(vma->obj); - if (err) - return err; - pinned_pages = true; - } + err = i915_gem_object_pin_pages(vma->obj); + if (err) + return err; /* Allocations ahoy! */ if (mutex_lock_interruptible(&vma->pages_mutex)) { @@ -838,8 +863,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count) if (atomic_sub_return(count, &vma->pages_count) == 0) { vma->ops->clear_pages(vma); GEM_BUG_ON(vma->pages); - if (vma->obj) - i915_gem_object_unpin_pages(vma->obj); + + i915_gem_object_unpin_pages(vma->obj); } mutex_unlock(&vma->pages_mutex); } @@ -870,12 +895,13 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + struct dma_fence *moving = NULL; intel_wakeref_t wakeref = 0; unsigned int bound; int err; #ifdef CONFIG_PROVE_LOCKING - if (debug_locks && !WARN_ON(!ww) && vma->resv) + if (debug_locks && !WARN_ON(!ww)) assert_vma_held(vma); #endif @@ -895,7 +921,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (flags & PIN_GLOBAL) wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); - if (flags & vma->vm->bind_async_flags) { + moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; + if (flags & vma->vm->bind_async_flags || moving) { /* lock VM */ err = i915_vm_lock_objects(vma->vm, ww); if (err) @@ -909,6 +936,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, work->vm = i915_vm_get(vma->vm); + dma_fence_work_chain(&work->base, moving); + /* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) { err = i915_vm_alloc_pt_stash(vma->vm, @@ -983,7 +1012,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!vma->pages); err = i915_vma_bind(vma, - vma->obj ? vma->obj->cache_level : 0, + vma->obj->cache_level, flags, work); if (err) goto err_remove; @@ -1013,7 +1042,10 @@ err_fence: err_rpm: if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + if (moving) + dma_fence_put(moving); vma_put_pages(vma); + return err; } @@ -1037,7 +1069,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!i915_vma_is_ggtt(vma)); #ifdef CONFIG_LOCKDEP - WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); + WARN_ON(!ww && dma_resv_held(vma->obj->base.resv)); #endif do { @@ -1116,6 +1148,7 @@ void i915_vma_reopen(struct i915_vma *vma) void i915_vma_release(struct kref *ref) { struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + struct drm_i915_gem_object *obj = vma->obj; if (drm_mm_node_allocated(&vma->node)) { mutex_lock(&vma->vm->mutex); @@ -1126,15 +1159,11 @@ void i915_vma_release(struct kref *ref) } GEM_BUG_ON(i915_vma_is_active(vma)); - if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - - spin_lock(&obj->vma.lock); - list_del(&vma->obj_link); - if (!RB_EMPTY_NODE(&vma->obj_node)) - rb_erase(&vma->obj_node, &obj->vma.tree); - spin_unlock(&obj->vma.lock); - } + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + if (!RB_EMPTY_NODE(&vma->obj_node)) + rb_erase(&vma->obj_node, &obj->vma.tree); + spin_unlock(&obj->vma.lock); __i915_vma_remove_closed(vma); i915_vm_put(vma->vm); @@ -1234,9 +1263,10 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) return i915_active_add_request(&vma->active, rq); } -int i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) +int _i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + struct dma_fence *fence, + unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; int err; @@ -1257,18 +1287,22 @@ int i915_vma_move_to_active(struct i915_vma *vma, intel_frontbuffer_put(front); } - dma_resv_add_excl_fence(vma->resv, &rq->fence); - obj->write_domain = I915_GEM_DOMAIN_RENDER; - obj->read_domains = 0; + if (fence) { + dma_resv_add_excl_fence(vma->obj->base.resv, fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; + obj->read_domains = 0; + } } else { if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (unlikely(err)) return err; } - dma_resv_add_shared_fence(vma->resv, &rq->fence); - obj->write_domain = 0; + if (fence) { + dma_resv_add_shared_fence(vma->obj->base.resv, fence); + obj->write_domain = 0; + } } if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index ed69f66c7ab0..4033aa08d5e4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -57,9 +57,16 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma) int __must_check __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq); -int __must_check i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); +int __must_check _i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + struct dma_fence *fence, + unsigned int flags); +static inline int __must_check +i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, + unsigned int flags) +{ + return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); +} #define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) @@ -227,16 +234,16 @@ static inline void __i915_vma_put(struct i915_vma *vma) kref_put(&vma->ref, i915_vma_release); } -#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) +#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) static inline void i915_vma_lock(struct i915_vma *vma) { - dma_resv_lock(vma->resv, NULL); + dma_resv_lock(vma->obj->base.resv, NULL); } static inline void i915_vma_unlock(struct i915_vma *vma) { - dma_resv_unlock(vma->resv); + dma_resv_unlock(vma->obj->base.resv); } int __must_check @@ -411,9 +418,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma) list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ for_each_until(!i915_vma_is_ggtt(V)) -struct i915_vma *i915_vma_alloc(void); -void i915_vma_free(struct i915_vma *vma); - struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c new file mode 100644 index 000000000000..2949ceea9884 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_vma_snapshot.h" +#include "i915_vma_types.h" +#include "i915_vma.h" + +/** + * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from + * a struct i915_vma. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + if (!i915_vma_is_pinned(vma)) + assert_object_held(vma->obj); + + vsnap->name = name; + vsnap->size = vma->size; + vsnap->obj_size = vma->obj->base.size; + vsnap->gtt_offset = vma->node.start; + vsnap->gtt_size = vma->node.size; + vsnap->page_sizes = vma->page_sizes.gtt; + vsnap->pages = vma->pages; + vsnap->pages_rsgt = NULL; + vsnap->mr = NULL; + if (vma->obj->mm.rsgt) + vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt); + vsnap->mr = vma->obj->mm.region; + kref_init(&vsnap->kref); + vsnap->vma_resource = &vma->active; + vsnap->onstack = false; + vsnap->present = true; +} + +/** + * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from + * a struct i915_vma, but avoid kfreeing it on last put. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + i915_vma_snapshot_init(vsnap, vma, name); + vsnap->onstack = true; +} + +static void vma_snapshot_release(struct kref *ref) +{ + struct i915_vma_snapshot *vsnap = + container_of(ref, typeof(*vsnap), kref); + + vsnap->present = false; + if (vsnap->pages_rsgt) + i915_refct_sgt_put(vsnap->pages_rsgt); + if (!vsnap->onstack) + kfree(vsnap); +} + +/** + * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference + * @vsnap: The pointer reference + */ +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap) +{ + kref_put(&vsnap->kref, vma_snapshot_release); +} + +/** + * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer + * reference and varify that the structure is released + * @vsnap: The pointer reference + * + * This function is intended to be paired with a i915_vma_init_onstack() + * and should be called before exiting the scope that declared or + * freeing the structure that embedded @vsnap to verify that all references + * have been released. + */ +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap) +{ + if (!kref_put(&vsnap->kref, vma_snapshot_release)) + GEM_BUG_ON(1); +} + +/** + * i915_vma_snapshot_resource_pin - Temporarily block the memory the + * vma snapshot is pointing to from being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs + * to be passed to the paired i915_vma_snapshot_resource_unpin. + * + * This function will temporarily try to hold up a fence or similar structure + * and will therefore enter a fence signaling critical section. + * + * Return: true if we succeeded in blocking the memory from being released, + * false otherwise. + */ +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie) +{ + bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource); + + if (pinned) + *lockdep_cookie = dma_fence_begin_signalling(); + + return pinned; +} + +/** + * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from + * being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin(). + * + * Might leave a fence signalling critical section and signal a fence. + */ +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie) +{ + dma_fence_end_signalling(lockdep_cookie); + + return i915_active_release(vsnap->vma_resource); +} diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h new file mode 100644 index 000000000000..940581df4622 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_VMA_SNAPSHOT_H_ +#define _I915_VMA_SNAPSHOT_H_ + +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct i915_active; +struct i915_refct_sgt; +struct i915_vma; +struct intel_memory_region; +struct sg_table; + +/** + * DOC: Simple utilities for snapshotting GPU vma metadata, later used for + * error capture. Vi use a separate header for this to avoid issues due to + * recursive header includes. + */ + +/** + * struct i915_vma_snapshot - Snapshot of vma metadata. + * @size: The vma size in bytes. + * @obj_size: The size of the underlying object in bytes. + * @gtt_offset: The gtt offset the vma is bound to. + * @gtt_size: The size in bytes allocated for the vma in the GTT. + * @pages: The struct sg_table pointing to the pages bound. + * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any. + * @mr: The memory region pointed for the pages bound. + * @kref: Reference for this structure. + * @vma_resource: FIXME: A means to keep the unbind fence from signaling. + * Temporarily while we have only sync unbinds, and still use the vma + * active, we use that. With async unbinding we need a signaling refcount + * for the unbind fence. + * @page_sizes: The vma GTT page sizes information. + * @onstack: Whether the structure shouldn't be freed on final put. + * @present: Whether the structure is present and initialized. + */ +struct i915_vma_snapshot { + const char *name; + size_t size; + size_t obj_size; + size_t gtt_offset; + size_t gtt_size; + struct sg_table *pages; + struct i915_refct_sgt *pages_rsgt; + struct intel_memory_region *mr; + struct kref kref; + struct i915_active *vma_resource; + u32 page_sizes; + bool onstack:1; + bool present:1; +}; + +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap); + +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap); + +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie); + +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie); + +/** + * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot + * @gfp: Allocation mode. + * + * Return: A pointer to a struct i915_vma_snapshot if successful. + * NULL otherwise. + */ +static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp) +{ + return kmalloc(sizeof(struct i915_vma_snapshot), gfp); +} + +/** + * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot + * + * Return: A pointer to a struct i915_vma_snapshot. + */ +static inline struct i915_vma_snapshot * +i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap) +{ + kref_get(&vsnap->kref); + return vsnap; +} + +/** + * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is + * present and initialized. + * + * Return: true if present and initialized; false otherwise. + */ +static inline bool +i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap) +{ + return vsnap && vsnap->present; +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 995b502d7e5d..f03fa96a1701 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -97,16 +97,26 @@ enum i915_cache_level; struct intel_remapped_plane_info { /* in gtt pages */ - u32 offset; - u16 width; - u16 height; - u16 src_stride; - u16 dst_stride; + u32 offset:31; + u32 linear:1; + union { + /* in gtt pages for !linear */ + struct { + u16 width; + u16 height; + u16 src_stride; + u16 dst_stride; + }; + + /* in gtt pages for linear */ + u32 size; + }; } __packed; struct intel_remapped_info { - struct intel_remapped_plane_info plane[2]; - u32 unused_mbz; + struct intel_remapped_plane_info plane[4]; + /* in gtt pages */ + u32 plane_alignment; } __packed; struct intel_rotation_info { @@ -129,7 +139,7 @@ static inline void assert_i915_gem_gtt_types(void) { BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16)); BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int)); - BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 3 * sizeof(u32) + 8 * sizeof(u16)); + BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16)); /* Check that rotation/remapped shares offsets for simplicity */ BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != @@ -177,7 +187,6 @@ struct i915_vma { const struct i915_vma_ops *ops; struct drm_i915_gem_object *obj; - struct dma_resv *resv; /** Alias of obj->resv */ struct sg_table *pages; void __iomem *iomap; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 305facedd284..04fd266d70e2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -83,33 +83,26 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -static const char *iommu_name(void) -{ - const char *msg = "n/a"; - -#ifdef CONFIG_INTEL_IOMMU - msg = enableddisabled(intel_iommu_gfx_mapped); -#endif - - return msg; -} - void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p) { - if (info->graphics_rel) - drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel); + if (info->graphics.rel) + drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver, + info->graphics.rel); else - drm_printf(p, "graphics version: %u\n", info->graphics_ver); + drm_printf(p, "graphics version: %u\n", info->graphics.ver); - if (info->media_rel) - drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel); + if (info->media.rel) + drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel); else - drm_printf(p, "media version: %u\n", info->media_ver); + drm_printf(p, "media version: %u\n", info->media.ver); + + if (info->display.rel) + drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel); + else + drm_printf(p, "display version: %u\n", info->display.ver); - drm_printf(p, "display version: %u\n", info->display.ver); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "iommu: %s\n", iommu_name()); drm_printf(p, "memory-regions: %x\n", info->memory_regions); drm_printf(p, "page-sizes: %x\n", info->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); @@ -177,6 +170,10 @@ static const u16 subplatform_portf_ids[] = { INTEL_ICL_PORT_F_IDS(0), }; +static const u16 subplatform_rpls_ids[] = { + INTEL_RPLS_IDS(0), +}; + static bool find_devid(u16 id, const u16 *p, unsigned int num) { for (; num; num--, p++) { @@ -213,6 +210,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); + } else if (find_devid(devid, subplatform_rpls_ids, + ARRAY_SIZE(subplatform_rpls_ids))) { + mask = BIT(INTEL_SUBPLATFORM_RPL_S); } if (IS_TIGERLAKE(i915)) { @@ -326,33 +326,33 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { drm_info(&dev_priv->drm, "Display fused off, disabling\n"); - info->pipe_mask = 0; - info->cpu_transcoder_mask = 0; + info->display.pipe_mask = 0; + info->display.cpu_transcoder_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) { u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_A); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + info->display.pipe_mask &= ~BIT(PIPE_A); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_B); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_B); + info->display.pipe_mask &= ~BIT(PIPE_B); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_B); } if (dfsm & SKL_DFSM_PIPE_C_DISABLE) { - info->pipe_mask &= ~BIT(PIPE_C); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); + info->display.pipe_mask &= ~BIT(PIPE_C); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } if (DISPLAY_VER(dev_priv) >= 12 && (dfsm & TGL_DFSM_PIPE_D_DISABLE)) { - info->pipe_mask &= ~BIT(PIPE_D); - info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D); + info->display.pipe_mask &= ~BIT(PIPE_D); + info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_D); } if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) @@ -369,7 +369,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_dsc = 0; } - if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) { + if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); info->ppgtt_type = INTEL_PPGTT_NONE; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index d328bb95c49b..d9ac6540d058 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -110,6 +110,9 @@ enum intel_platform { #define INTEL_SUBPLATFORM_G10 0 #define INTEL_SUBPLATFORM_G11 1 +/* ADL-S */ +#define INTEL_SUBPLATFORM_RPL_S 0 + enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, @@ -133,6 +136,7 @@ enum intel_ppgtt_type { func(has_logical_ring_elsq); \ func(has_mslices); \ func(has_pooled_eu); \ + func(has_pxp); \ func(has_rc6); \ func(has_rc6p); \ func(has_rps); \ @@ -165,11 +169,14 @@ enum intel_ppgtt_type { func(overlay_needs_physical); \ func(supports_tv); +struct ip_version { + u8 ver; + u8 rel; +}; + struct intel_device_info { - u8 graphics_ver; - u8 graphics_rel; - u8 media_ver; - u8 media_rel; + struct ip_version graphics; + struct ip_version media; intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ @@ -188,17 +195,17 @@ struct intel_device_info { u8 gt; /* GT number, 0 if undefined */ - u8 pipe_mask; - u8 cpu_transcoder_mask; - - u8 abox_mask; - #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG struct { u8 ver; + u8 rel; + + u8 pipe_mask; + u8 cpu_transcoder_mask; + u8 abox_mask; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 91866520c173..84bb212bae4b 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -5,7 +5,7 @@ #include "i915_drv.h" #include "intel_dram.h" -#include "intel_sideband.h" +#include "intel_pcode.h" struct dram_dimm_info { u16 size; @@ -244,7 +244,6 @@ static int skl_get_dram_info(struct drm_i915_private *i915) { struct dram_info *dram_info = &i915->dram_info; - u32 mem_freq_khz, val; int ret; dram_info->type = skl_get_dram_type(i915); @@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915) if (ret) return ret; - val = intel_uncore_read(&i915->uncore, - SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); - mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * - SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); - - if (dram_info->num_channels * mem_freq_khz == 0) { - drm_info(&i915->drm, - "Couldn't get system memory bandwidth\n"); - return -EINVAL; - } - return 0; } @@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val) static int bxt_get_dram_info(struct drm_i915_private *i915) { struct dram_info *dram_info = &i915->dram_info; - u32 dram_channels; - u32 mem_freq_khz, val; - u8 num_active_channels, valid_ranks = 0; + u32 val; + u8 valid_ranks = 0; int i; - val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0); - mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * - BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); - - dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; - num_active_channels = hweight32(dram_channels); - - if (mem_freq_khz * num_active_channels == 0) { - drm_info(&i915->drm, - "Couldn't get system memory bandwidth\n"); - return -EINVAL; - } - /* * Now read each DUNIT8/9/10/11 to check the rank of each dimms. */ @@ -444,7 +418,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) break; default: MISSING_CASE(val & 0xf); - return -1; + return -EINVAL; } } else { switch (val & 0xf) { @@ -462,7 +436,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) break; default: MISSING_CASE(val & 0xf); - return -1; + return -EINVAL; } } diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 779eb2fa90b6..b43121609e25 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -78,6 +78,18 @@ int intel_memory_region_reserve(struct intel_memory_region *mem, return i915_ttm_buddy_man_reserve(man, offset, size); } +void intel_memory_region_debug(struct intel_memory_region *mr, + struct drm_printer *printer) +{ + drm_printf(printer, "%s: ", mr->name); + + if (mr->region_private) + ttm_resource_manager_debug(mr->region_private, printer); + else + drm_printf(printer, "total:%pa, available:%pa bytes\n", + &mr->total, &mr->avail); +} + struct intel_memory_region * intel_memory_region_create(struct drm_i915_private *i915, resource_size_t start, @@ -114,7 +126,6 @@ intel_memory_region_create(struct drm_i915_private *i915, goto err_free; } - kref_init(&mem->kref); return mem; err_free: @@ -132,28 +143,17 @@ void intel_memory_region_set_name(struct intel_memory_region *mem, va_end(ap); } -static void __intel_memory_region_destroy(struct kref *kref) +void intel_memory_region_destroy(struct intel_memory_region *mem) { - struct intel_memory_region *mem = - container_of(kref, typeof(*mem), kref); + int ret = 0; if (mem->ops->release) - mem->ops->release(mem); + ret = mem->ops->release(mem); + GEM_WARN_ON(!list_empty_careful(&mem->objects.list)); mutex_destroy(&mem->objects.lock); - kfree(mem); -} - -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem) -{ - kref_get(&mem->kref); - return mem; -} - -void intel_memory_region_put(struct intel_memory_region *mem) -{ - kref_put(&mem->kref, __intel_memory_region_destroy); + if (!ret) + kfree(mem); } /* Global memory region registration -- only slight layer inversions! */ @@ -222,7 +222,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915) fetch_and_zero(&i915->mm.regions[i]); if (region) - intel_memory_region_put(region); + intel_memory_region_destroy(region); } } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 1f2b96efa69d..5625c9c38993 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -6,7 +6,6 @@ #ifndef __INTEL_MEMORY_REGION_H__ #define __INTEL_MEMORY_REGION_H__ -#include <linux/kref.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/io-mapping.h> @@ -15,6 +14,7 @@ struct drm_i915_private; struct drm_i915_gem_object; +struct drm_printer; struct intel_memory_region; struct sg_table; struct ttm_resource; @@ -50,7 +50,7 @@ struct intel_memory_region_ops { unsigned int flags; int (*init)(struct intel_memory_region *mem); - void (*release)(struct intel_memory_region *mem); + int (*release)(struct intel_memory_region *mem); int (*init_object)(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, @@ -70,8 +70,6 @@ struct intel_memory_region { /* For fake LMEM */ struct drm_mm_node fake_mappable; - struct kref kref; - resource_size_t io_start; resource_size_t min_page_size; resource_size_t total; @@ -109,9 +107,7 @@ intel_memory_region_create(struct drm_i915_private *i915, u16 instance, const struct intel_memory_region_ops *ops); -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem); -void intel_memory_region_put(struct intel_memory_region *mem); +void intel_memory_region_destroy(struct intel_memory_region *mem); int intel_memory_regions_hw_probe(struct drm_i915_private *i915); void intel_memory_regions_driver_release(struct drm_i915_private *i915); @@ -127,6 +123,9 @@ int intel_memory_region_reserve(struct intel_memory_region *mem, resource_size_t offset, resource_size_t size); +void intel_memory_region_debug(struct intel_memory_region *mr, + struct drm_printer *printer); + struct intel_memory_region * i915_gem_ttm_system_setup(struct drm_i915_private *i915, u16 type, u16 instance); diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index d1d4b97b86f5..da8f82c2342f 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -129,6 +129,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_JSP; case INTEL_PCH_ADP_DEVICE_ID_TYPE: case INTEL_PCH_ADP2_DEVICE_ID_TYPE: + case INTEL_PCH_ADP3_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Alder Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ALDERLAKE_S(dev_priv) && !IS_ALDERLAKE_P(dev_priv)); diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 7c0d83d292dc..6bff77521094 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -57,6 +57,7 @@ enum intel_pch { #define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80 #define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180 +#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c new file mode 100644 index 000000000000..e8c886e4e78d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2013-2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_pcode.h" + +static int gen6_check_mailbox_status(u32 mbox) +{ + switch (mbox & GEN6_PCODE_ERROR_MASK) { + case GEN6_PCODE_SUCCESS: + return 0; + case GEN6_PCODE_UNIMPLEMENTED_CMD: + return -ENODEV; + case GEN6_PCODE_ILLEGAL_CMD: + return -ENXIO; + case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + return -EOVERFLOW; + case GEN6_PCODE_TIMEOUT: + return -ETIMEDOUT; + default: + MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); + return 0; + } +} + +static int gen7_check_mailbox_status(u32 mbox) +{ + switch (mbox & GEN6_PCODE_ERROR_MASK) { + case GEN6_PCODE_SUCCESS: + return 0; + case GEN6_PCODE_ILLEGAL_CMD: + return -ENXIO; + case GEN7_PCODE_TIMEOUT: + return -ETIMEDOUT; + case GEN7_PCODE_ILLEGAL_DATA: + return -EINVAL; + case GEN11_PCODE_ILLEGAL_SUBCOMMAND: + return -ENXIO; + case GEN11_PCODE_LOCKED: + return -EBUSY; + case GEN11_PCODE_REJECTED: + return -EACCES; + case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: + return -EOVERFLOW; + default: + MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); + return 0; + } +} + +static int __sandybridge_pcode_rw(struct drm_i915_private *i915, + u32 mbox, u32 *val, u32 *val1, + int fast_timeout_us, + int slow_timeout_ms, + bool is_read) +{ + struct intel_uncore *uncore = &i915->uncore; + + lockdep_assert_held(&i915->sb_lock); + + /* + * GEN6_PCODE_* are outside of the forcewake domain, we can use + * intel_uncore_read/write_fw variants to reduce the amount of work + * required when reading/writing. + */ + + if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) + return -EAGAIN; + + intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val); + intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0); + intel_uncore_write_fw(uncore, + GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); + + if (__intel_wait_for_register_fw(uncore, + GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY, 0, + fast_timeout_us, + slow_timeout_ms, + &mbox)) + return -ETIMEDOUT; + + if (is_read) + *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA); + if (is_read && val1) + *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); + + if (GRAPHICS_VER(i915) > 6) + return gen7_check_mailbox_status(mbox); + else + return gen6_check_mailbox_status(mbox); +} + +int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1) +{ + int err; + + mutex_lock(&i915->sb_lock); + err = __sandybridge_pcode_rw(i915, mbox, val, val1, + 500, 20, + true); + mutex_unlock(&i915->sb_lock); + + if (err) { + drm_dbg(&i915->drm, + "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", + mbox, __builtin_return_address(0), err); + } + + return err; +} + +int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, + u32 mbox, u32 val, + int fast_timeout_us, + int slow_timeout_ms) +{ + int err; + + mutex_lock(&i915->sb_lock); + err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, + fast_timeout_us, slow_timeout_ms, + false); + mutex_unlock(&i915->sb_lock); + + if (err) { + drm_dbg(&i915->drm, + "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", + val, mbox, __builtin_return_address(0), err); + } + + return err; +} + +static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, + u32 request, u32 reply_mask, u32 reply, + u32 *status) +{ + *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, + 500, 0, + true); + + return *status || ((request & reply_mask) == reply); +} + +/** + * skl_pcode_request - send PCODE request until acknowledgment + * @i915: device private + * @mbox: PCODE mailbox ID the request is targeted for + * @request: request ID + * @reply_mask: mask used to check for request acknowledgment + * @reply: value used to check for request acknowledgment + * @timeout_base_ms: timeout for polling with preemption enabled + * + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE + * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. + * The request is acknowledged once the PCODE reply dword equals @reply after + * applying @reply_mask. Polling is first attempted with preemption enabled + * for @timeout_base_ms and if this times out for another 50 ms with + * preemption disabled. + * + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some + * other error as reported by PCODE. + */ +int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + u32 status; + int ret; + + mutex_lock(&i915->sb_lock); + +#define COND \ + skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status) + + /* + * Prime the PCODE by doing a request first. Normally it guarantees + * that a subsequent request, at most @timeout_base_ms later, succeeds. + * _wait_for() doesn't guarantee when its passed condition is evaluated + * first, so send the first request explicitly. + */ + if (COND) { + ret = 0; + goto out; + } + ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); + if (!ret) + goto out; + + /* + * The above can time out if the number of requests was low (2 in the + * worst case) _and_ PCODE was busy for some reason even after a + * (queued) request and @timeout_base_ms delay. As a workaround retry + * the poll with preemption disabled to maximize the number of + * requests. Increase the timeout from @timeout_base_ms to 50ms to + * account for interrupts that could reduce the number of these + * requests, and for any quirks of the PCODE firmware that delays + * the request completion. + */ + drm_dbg_kms(&i915->drm, + "PCODE timeout, retrying with preemption disabled\n"); + drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3); + preempt_disable(); + ret = wait_for_atomic(COND, 50); + preempt_enable(); + +out: + mutex_unlock(&i915->sb_lock); + return ret ? ret : status; +#undef COND +} + +int intel_pcode_init(struct drm_i915_private *i915) +{ + int ret = 0; + + if (!IS_DGFX(i915)) + return ret; + + ret = skl_pcode_request(i915, DG1_PCODE_STATUS, + DG1_UNCORE_GET_INIT_STATUS, + DG1_UNCORE_INIT_STATUS_COMPLETE, + DG1_UNCORE_INIT_STATUS_COMPLETE, 180000); + + drm_dbg(&i915->drm, "PCODE init status %d\n", ret); + + if (ret) + drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n"); + + return ret; +} diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h new file mode 100644 index 000000000000..50806649d4b6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2013-2021 Intel Corporation + */ + +#ifndef _INTEL_PCODE_H_ +#define _INTEL_PCODE_H_ + +#include <linux/types.h> + +struct drm_i915_private; + +int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1); +int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, + u32 val, int fast_timeout_us, + int slow_timeout_ms); +#define sandybridge_pcode_write(i915, mbox, val) \ + sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0) + +int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); + +int intel_pcode_init(struct drm_i915_private *i915); + +#endif /* _INTEL_PCODE_H */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 65bc3709f54c..434b1f8b7fe3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -36,7 +36,9 @@ #include "display/intel_atomic_plane.h" #include "display/intel_bw.h" #include "display/intel_de.h" +#include "display/intel_display_trace.h" #include "display/intel_display_types.h" +#include "display/intel_fb.h" #include "display/intel_fbc.h" #include "display/intel_sprite.h" #include "display/skl_universal_plane.h" @@ -46,9 +48,9 @@ #include "i915_drv.h" #include "i915_fixed.h" #include "i915_irq.h" -#include "i915_trace.h" +#include "intel_pcode.h" #include "intel_pm.h" -#include "intel_sideband.h" +#include "vlv_sideband.h" #include "../../../platform/x86/intel_ips.h" /* Stores plane specific WM parameters */ @@ -76,6 +78,8 @@ struct intel_wm_config { static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { + enum pipe pipe; + if (HAS_LLC(dev_priv)) { /* * WaCompressedResourceDisplayNewHashMode:skl,kbl @@ -89,6 +93,16 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) SKL_DE_COMPRESSED_HASH_MODE); } + for_each_pipe(dev_priv, pipe) { + /* + * "Plane N strech max must be programmed to 11b (x1) + * when Async flips are enabled on that plane." + */ + if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv)) + intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); + } + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); @@ -147,7 +161,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0883: bxt */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -881,9 +895,8 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) return enabled; } -static void pnv_update_wm(struct intel_crtc *unused_crtc) +static void pnv_update_wm(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; const struct cxsr_latency *latency; u32 reg; @@ -976,7 +989,7 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | @@ -1008,7 +1021,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, enum pipe pipe; for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | @@ -1152,17 +1165,13 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, cpp = plane_state->hw.fb->format->cpp[0]; /* - * Not 100% sure which way ELK should go here as the - * spec only says CL/CTG should assume 32bpp and BW - * doesn't need to. But as these things followed the - * mobile vs. desktop lines on gen3 as well, let's - * assume ELK doesn't need this. + * WaUse32BppForSRWM:ctg,elk * - * The spec also fails to list such a restriction for - * the HPLL watermark, which seems a little strange. + * The spec fails to list this restriction for the + * HPLL watermark, which seems a little strange. * Let's use 32bpp for the HPLL watermark as well. */ - if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY && + if (plane->id == PLANE_PRIMARY && level != G4X_WM_LEVEL_NORMAL) cpp = max(cpp, 4u); @@ -1376,8 +1385,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; - int num_active_planes = hweight8(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); const struct g4x_pipe_wm *raw; const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -1417,7 +1425,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state, wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; wm_state->sr.fbc = raw->fbc; - wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY); + wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); level = G4X_WM_LEVEL_HPLL; if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) @@ -1708,7 +1716,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); int num_active_planes = hweight8(active_planes); const int fifo_size = 511; int fifo_extra, fifo_left = fifo_size; @@ -1900,8 +1908,8 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state, struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - int num_active_planes = hweight8(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + int num_active_planes = hweight8(active_planes); bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -2253,9 +2261,8 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state, mutex_unlock(&dev_priv->wm.wm_mutex); } -static void i965_update_wm(struct intel_crtc *unused_crtc) +static void i965_update_wm(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; int srwm = 1; int cursor_sr = 16; @@ -2329,9 +2336,22 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) #undef FW_WM -static void i9xx_update_wm(struct intel_crtc *unused_crtc) +static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, + enum i9xx_plane_id i9xx_plane) +{ + struct intel_plane *plane; + + for_each_intel_plane(&i915->drm, plane) { + if (plane->id == PLANE_PRIMARY && + plane->i9xx_plane == i9xx_plane) + return intel_crtc_for_pipe(i915, plane->pipe); + } + + return NULL; +} + +static void i9xx_update_wm(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); const struct intel_watermark_params *wm_info; u32 fwater_lo; u32 fwater_hi; @@ -2347,8 +2367,11 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) else wm_info = &i830_a_wm_info; - fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A); + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -2374,8 +2397,11 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (DISPLAY_VER(dev_priv) == 2) wm_info = &i830_bc_wm_info; - fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B); - crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B); + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { const struct drm_display_mode *pipe_mode = &crtc->config->hw.pipe_mode; @@ -2475,9 +2501,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) intel_set_memory_cxsr(dev_priv, true); } -static void i845_update_wm(struct intel_crtc *unused_crtc) +static void i845_update_wm(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); struct intel_crtc *crtc; const struct drm_display_mode *pipe_mode; u32 fwater_lo; @@ -2490,7 +2515,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) pipe_mode = &crtc->config->hw.pipe_mode; planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, &i845_wm_info, - dev_priv->display.get_fifo_size(dev_priv, PLANE_A), + i845_get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; @@ -2859,6 +2884,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, u32 val; int ret, i; int level, max_level = ilk_wm_max_level(dev_priv); + int mult = IS_DG2(dev_priv) ? 2 : 1; /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ @@ -2872,13 +2898,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, return; } - wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; - wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; - wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; - wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; + wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ @@ -2891,13 +2917,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, return; } - wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; - wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; - wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; - wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & - GEN9_MEM_LATENCY_LEVEL_MASK; + wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; + wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & + GEN9_MEM_LATENCY_LEVEL_MASK) * mult; /* * If a level n (n > 1) has a 0us latency, all levels m (m >= n) @@ -3051,9 +3077,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); if (!changed) return; @@ -3357,13 +3383,8 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, } /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - /* - * FIXME this is racy. FBC might get enabled later. - * What we should check here is whether FBC can be - * enabled sometime later. - */ - if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled && - intel_fbc_is_active(dev_priv)) { + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -5083,6 +5104,18 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } +static bool icl_need_wm1_wa(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + /* + * Wa_1408961008:icl, ehl + * Wa_14012656716:tgl, adl + * Underruns with WM1+ disabled + */ + return DISPLAY_VER(i915) == 11 || + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); +} + static int skl_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) @@ -5253,11 +5286,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], total[plane_id], uv_total[plane_id]); - /* - * Wa_1408961008:icl, ehl - * Underruns with WM1+ disabled - */ - if (DISPLAY_VER(dev_priv) == 11 && + if (icl_need_wm1_wa(dev_priv, plane_id) && level == 1 && wm->wm[0].enable) { wm->wm[level].blocks = wm->wm[0].blocks; wm->wm[level].lines = wm->wm[0].lines; @@ -6832,7 +6861,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) for_each_plane_id_on_crtc(crtc, plane_id) raw->plane[plane_id] = active->wm.plane[plane_id]; - if (++level > max_level) + level = G4X_WM_LEVEL_SR; + if (level > max_level) goto out; raw = &crtc_state->wm.g4x.raw[level]; @@ -6841,7 +6871,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) raw->plane[PLANE_SPRITE0] = 0; raw->fbc = active->sr.fbc; - if (++level > max_level) + level = G4X_WM_LEVEL_HPLL; + if (level > max_level) goto out; raw = &crtc_state->wm.g4x.raw[level]; @@ -6850,6 +6881,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) raw->plane[PLANE_SPRITE0] = 0; raw->fbc = active->hpll.fbc; + level++; out: for_each_plane_id_on_crtc(crtc, plane_id) g4x_raw_plane_wm_set(crtc_state, level, @@ -6886,7 +6918,7 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7042,7 +7074,7 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) for_each_intel_plane(&dev_priv->drm, plane) { struct intel_crtc *crtc = - intel_get_crtc_for_pipe(dev_priv, plane->pipe); + intel_crtc_for_pipe(dev_priv, plane->pipe); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane_state *plane_state = @@ -7129,47 +7161,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } -/** - * intel_update_watermarks - update FIFO watermark values based on current modes - * @crtc: the #intel_crtc on which to compute the WM - * - * Calculate watermark values for the various WM regs based on current mode - * and plane configuration. - * - * There are several cases to deal with here: - * - normal (i.e. non-self-refresh) - * - self-refresh (SR) mode - * - lines are large relative to FIFO size (buffer can hold up to 2) - * - lines are small relative to FIFO size (buffer can hold more than 2 - * lines), so need to account for TLB latency - * - * The normal calculation is: - * watermark = dotclock * bytes per pixel * latency - * where latency is platform & configuration dependent (we assume pessimal - * values here). - * - * The SR calculation is: - * watermark = (trunc(latency/line time)+1) * surface width * - * bytes per pixel - * where - * line time = htotal / dotclock - * surface width = hdisplay for normal plane and 64 for cursor - * and latency is assumed to be high, as above. - * - * The final value programmed to the register should always be rounded up, - * and include an extra 2 entries to account for clock crossings. - * - * We don't use the sprite, so we can ignore that. And on Crestline we have - * to set the non-SR watermarks to 8. - */ -void intel_update_watermarks(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (dev_priv->display.update_wm) - dev_priv->display.update_wm(crtc); -} - void intel_enable_ipc(struct drm_i915_private *dev_priv) { u32 val; @@ -7461,7 +7452,7 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7470,11 +7461,11 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) { - /* Wa_1409120013:tgl,rkl,adl-s,dg1 */ + /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) + IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) @@ -7500,11 +7491,34 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) gen12lp_init_clock_gating(dev_priv); /* Wa_1409836686:dg1[a0] */ - if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | DPT_GATING_DIS); } +static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) +{ + /* Wa_22010146351:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) + intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); +} + +static void dg2_init_clock_gating(struct drm_i915_private *i915) +{ + /* Wa_22010954014:dg2_g10 */ + if (IS_DG2_G10(i915)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGSI_SIDECLK_DIS); + + /* + * Wa_14010733611:dg2_g10 + * Wa_22010146351:dg2_g10 + */ + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGR_DIS | SGGI_DIS); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -7536,7 +7550,7 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: cfl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7548,12 +7562,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) FBC_LLC_FULLY_OPEN); /* WaDisableSDEUnitClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaDisableGamClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); @@ -7569,7 +7583,7 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: kbl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7596,14 +7610,14 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7627,11 +7641,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | BDW_DPRS_MASK_VBLANK_SRD); - - /* Undocumented but fixes async flip + VT-d corruption */ - if (intel_vtd_active()) - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), - HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); } /* WaVSRefCountFullforceMissDisable:bdw */ @@ -7667,20 +7676,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { - enum pipe pipe; - /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); - for_each_pipe(dev_priv, pipe) { - /* Undocumented but fixes async flip + VT-d corruption */ - if (intel_vtd_active()) - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), - HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1); - } - /* This is required by WaCatErrorRejectionIssue:hsw */ intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | @@ -7909,7 +7909,7 @@ static void i830_init_clock_gating(struct drm_i915_private *dev_priv) void intel_init_clock_gating(struct drm_i915_private *dev_priv) { - dev_priv->display.init_clock_gating(dev_priv); + dev_priv->clock_gating_funcs->init_clock_gating(dev_priv); } void intel_suspend_hw(struct drm_i915_private *dev_priv) @@ -7924,6 +7924,38 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) "No clock gating settings or workarounds applied.\n"); } +#define CG_FUNCS(platform) \ +static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \ + .init_clock_gating = platform##_init_clock_gating, \ +} + +CG_FUNCS(dg2); +CG_FUNCS(xehpsdv); +CG_FUNCS(adlp); +CG_FUNCS(dg1); +CG_FUNCS(gen12lp); +CG_FUNCS(icl); +CG_FUNCS(cfl); +CG_FUNCS(skl); +CG_FUNCS(kbl); +CG_FUNCS(bxt); +CG_FUNCS(glk); +CG_FUNCS(bdw); +CG_FUNCS(chv); +CG_FUNCS(hsw); +CG_FUNCS(ivb); +CG_FUNCS(vlv); +CG_FUNCS(gen6); +CG_FUNCS(ilk); +CG_FUNCS(g4x); +CG_FUNCS(i965gm); +CG_FUNCS(i965g); +CG_FUNCS(gen3); +CG_FUNCS(i85x); +CG_FUNCS(i830); +CG_FUNCS(nop); +#undef CG_FUNCS + /** * intel_init_clock_gating_hooks - setup the clock gating hooks * @dev_priv: device private @@ -7935,56 +7967,105 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_ALDERLAKE_P(dev_priv)) - dev_priv->display.init_clock_gating = adlp_init_clock_gating; + if (IS_DG2(dev_priv)) + dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; + else if (IS_XEHPSDV(dev_priv)) + dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; + else if (IS_ALDERLAKE_P(dev_priv)) + dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; else if (IS_DG1(dev_priv)) - dev_priv->display.init_clock_gating = dg1_init_clock_gating; + dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 12) - dev_priv->display.init_clock_gating = gen12lp_init_clock_gating; + dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 11) - dev_priv->display.init_clock_gating = icl_init_clock_gating; + dev_priv->clock_gating_funcs = &icl_clock_gating_funcs; else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) - dev_priv->display.init_clock_gating = cfl_init_clock_gating; + dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs; else if (IS_SKYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = skl_init_clock_gating; + dev_priv->clock_gating_funcs = &skl_clock_gating_funcs; else if (IS_KABYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = kbl_init_clock_gating; + dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs; else if (IS_BROXTON(dev_priv)) - dev_priv->display.init_clock_gating = bxt_init_clock_gating; + dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs; else if (IS_GEMINILAKE(dev_priv)) - dev_priv->display.init_clock_gating = glk_init_clock_gating; + dev_priv->clock_gating_funcs = &glk_clock_gating_funcs; else if (IS_BROADWELL(dev_priv)) - dev_priv->display.init_clock_gating = bdw_init_clock_gating; + dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs; else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->display.init_clock_gating = chv_init_clock_gating; + dev_priv->clock_gating_funcs = &chv_clock_gating_funcs; else if (IS_HASWELL(dev_priv)) - dev_priv->display.init_clock_gating = hsw_init_clock_gating; + dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs; else if (IS_IVYBRIDGE(dev_priv)) - dev_priv->display.init_clock_gating = ivb_init_clock_gating; + dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs; else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->display.init_clock_gating = vlv_init_clock_gating; + dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 6) - dev_priv->display.init_clock_gating = gen6_init_clock_gating; + dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 5) - dev_priv->display.init_clock_gating = ilk_init_clock_gating; + dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs; else if (IS_G4X(dev_priv)) - dev_priv->display.init_clock_gating = g4x_init_clock_gating; + dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs; else if (IS_I965GM(dev_priv)) - dev_priv->display.init_clock_gating = i965gm_init_clock_gating; + dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs; else if (IS_I965G(dev_priv)) - dev_priv->display.init_clock_gating = i965g_init_clock_gating; + dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 3) - dev_priv->display.init_clock_gating = gen3_init_clock_gating; + dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs; else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) - dev_priv->display.init_clock_gating = i85x_init_clock_gating; + dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 2) - dev_priv->display.init_clock_gating = i830_init_clock_gating; + dev_priv->clock_gating_funcs = &i830_clock_gating_funcs; else { MISSING_CASE(INTEL_DEVID(dev_priv)); - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; } } +static const struct drm_i915_wm_disp_funcs skl_wm_funcs = { + .compute_global_watermarks = skl_compute_wm, +}; + +static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = { + .compute_pipe_wm = ilk_compute_pipe_wm, + .compute_intermediate_wm = ilk_compute_intermediate_wm, + .initial_watermarks = ilk_initial_watermarks, + .optimize_watermarks = ilk_optimize_watermarks, +}; + +static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = { + .compute_pipe_wm = vlv_compute_pipe_wm, + .compute_intermediate_wm = vlv_compute_intermediate_wm, + .initial_watermarks = vlv_initial_watermarks, + .optimize_watermarks = vlv_optimize_watermarks, + .atomic_update_watermarks = vlv_atomic_update_fifo, +}; + +static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = { + .compute_pipe_wm = g4x_compute_pipe_wm, + .compute_intermediate_wm = g4x_compute_intermediate_wm, + .initial_watermarks = g4x_initial_watermarks, + .optimize_watermarks = g4x_optimize_watermarks, +}; + +static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = { + .update_wm = pnv_update_wm, +}; + +static const struct drm_i915_wm_disp_funcs i965_wm_funcs = { + .update_wm = i965_update_wm, +}; + +static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = { + .update_wm = i9xx_update_wm, +}; + +static const struct drm_i915_wm_disp_funcs i845_wm_funcs = { + .update_wm = i845_update_wm, +}; + +static const struct drm_i915_wm_disp_funcs nop_funcs = { +}; + /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_i915_private *dev_priv) { @@ -8000,7 +8081,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) /* For FIFO watermark updates */ if (DISPLAY_VER(dev_priv) >= 9) { skl_setup_wm_latency(dev_priv); - dev_priv->display.compute_global_watermarks = skl_compute_wm; + dev_priv->wm_disp = &skl_wm_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); @@ -8008,31 +8089,19 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || (DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] && dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { - dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; - dev_priv->display.compute_intermediate_wm = - ilk_compute_intermediate_wm; - dev_priv->display.initial_watermarks = - ilk_initial_watermarks; - dev_priv->display.optimize_watermarks = - ilk_optimize_watermarks; + dev_priv->wm_disp = &ilk_wm_funcs; } else { drm_dbg_kms(&dev_priv->drm, "Failed to read display plane latency. " "Disable CxSR\n"); + dev_priv->wm_disp = &nop_funcs; } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_setup_wm_latency(dev_priv); - dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm; - dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm; - dev_priv->display.initial_watermarks = vlv_initial_watermarks; - dev_priv->display.optimize_watermarks = vlv_optimize_watermarks; - dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo; + dev_priv->wm_disp = &vlv_wm_funcs; } else if (IS_G4X(dev_priv)) { g4x_setup_wm_latency(dev_priv); - dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm; - dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm; - dev_priv->display.initial_watermarks = g4x_initial_watermarks; - dev_priv->display.optimize_watermarks = g4x_optimize_watermarks; + dev_priv->wm_disp = &g4x_wm_funcs; } else if (IS_PINEVIEW(dev_priv)) { if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), dev_priv->is_ddr3, @@ -8046,25 +8115,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->fsb_freq, dev_priv->mem_freq); /* Disable CxSR and never update its watermark again */ intel_set_memory_cxsr(dev_priv, false); - dev_priv->display.update_wm = NULL; + dev_priv->wm_disp = &nop_funcs; } else - dev_priv->display.update_wm = pnv_update_wm; + dev_priv->wm_disp = &pnv_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->display.update_wm = i965_update_wm; + dev_priv->wm_disp = &i965_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i9xx_get_fifo_size; + dev_priv->wm_disp = &i9xx_wm_funcs; } else if (DISPLAY_VER(dev_priv) == 2) { - if (INTEL_NUM_PIPES(dev_priv) == 1) { - dev_priv->display.update_wm = i845_update_wm; - dev_priv->display.get_fifo_size = i845_get_fifo_size; - } else { - dev_priv->display.update_wm = i9xx_update_wm; - dev_priv->display.get_fifo_size = i830_get_fifo_size; - } + if (INTEL_NUM_PIPES(dev_priv) == 1) + dev_priv->wm_disp = &i845_wm_funcs; + else + dev_priv->wm_disp = &i9xx_wm_funcs; } else { drm_err(&dev_priv->drm, "unexpected fall-through in %s\n", __func__); + dev_priv->wm_disp = &nop_funcs; } } diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 91f23b7f0af2..990cdcaf85ce 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -8,7 +8,6 @@ #include <linux/types.h> -#include "display/intel_bw.h" #include "display/intel_display.h" #include "display/intel_global_state.h" @@ -19,6 +18,7 @@ struct drm_device; struct drm_i915_private; struct i915_request; struct intel_atomic_state; +struct intel_bw_state; struct intel_crtc; struct intel_crtc_state; struct intel_plane; @@ -29,7 +29,6 @@ struct skl_wm_level; void intel_init_clock_gating(struct drm_i915_private *dev_priv); void intel_suspend_hw(struct drm_i915_private *dev_priv); int ilk_wm_max_level(const struct drm_i915_private *dev_priv); -void intel_update_watermarks(struct intel_crtc *crtc); void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/intel_pm_types.h new file mode 100644 index 000000000000..211632f58751 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pm_types.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_PM_TYPES_H__ +#define __INTEL_PM_TYPES_H__ + +#include <linux/types.h> + +#include "display/intel_display.h" + +enum intel_ddb_partitioning { + INTEL_DDB_PART_1_2, + INTEL_DDB_PART_5_6, /* IVB+ */ +}; + +struct ilk_wm_values { + u32 wm_pipe[3]; + u32 wm_lp[3]; + u32 wm_lp_spr[3]; + bool enable_fbc_wm; + enum intel_ddb_partitioning partitioning; +}; + +struct g4x_pipe_wm { + u16 plane[I915_MAX_PLANES]; + u16 fbc; +}; + +struct g4x_sr_wm { + u16 plane; + u16 cursor; + u16 fbc; +}; + +struct vlv_wm_ddl_values { + u8 plane[I915_MAX_PLANES]; +}; + +struct vlv_wm_values { + struct g4x_pipe_wm pipe[3]; + struct g4x_sr_wm sr; + struct vlv_wm_ddl_values ddl[3]; + u8 level; + bool cxsr; +}; + +struct g4x_wm_values { + struct g4x_pipe_wm pipe[2]; + struct g4x_sr_wm sr; + struct g4x_sr_wm hpll; + bool cxsr; + bool hpll_en; + bool fbc_en; +}; + +struct skl_ddb_entry { + u16 start, end; /* in number of blocks, 'end' is exclusive */ +}; + +static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry) +{ + return entry->end - entry->start; +} + +static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, + const struct skl_ddb_entry *e2) +{ + if (e1->start == e2->start && e1->end == e2->end) + return true; + + return false; +} + +#endif /* __INTEL_PM_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 98c7339bf8ba..f2b888c16958 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -104,19 +104,50 @@ int intel_region_ttm_init(struct intel_memory_region *mem) * memory region, and if it was registered with the TTM device, * removes that registration. */ -void intel_region_ttm_fini(struct intel_memory_region *mem) +int intel_region_ttm_fini(struct intel_memory_region *mem) { - int ret; + struct ttm_resource_manager *man = mem->region_private; + int ret = -EBUSY; + int count; + + /* + * Put the region's move fences. This releases requests that + * may hold on to contexts and vms that may hold on to buffer + * objects placed in this region. + */ + if (man) + ttm_resource_manager_cleanup(man); + + /* Flush objects from region. */ + for (count = 0; count < 10; ++count) { + i915_gem_flush_free_objects(mem->i915); + + mutex_lock(&mem->objects.lock); + if (list_empty(&mem->objects.list)) + ret = 0; + mutex_unlock(&mem->objects.lock); + if (!ret) + break; + + msleep(20); + flush_delayed_work(&mem->i915->bdev.wq); + } + + /* If we leaked objects, Don't free the region causing use after free */ + if (ret || !man) + return ret; ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, intel_region_to_ttm_type(mem)); GEM_WARN_ON(ret); mem->region_private = NULL; + + return ret; } /** - * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource - * to an sg_table. + * intel_region_ttm_resource_to_rsgt - + * Convert an opaque TTM resource manager resource to a refcounted sg_table. * @mem: The memory region. * @res: The resource manager resource obtained from the TTM resource manager. * @@ -126,17 +157,18 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) * * Return: A malloced sg_table on success, an error pointer on failure. */ -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res) +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res) { if (mem->is_range_manager) { struct ttm_range_mgr_node *range_node = to_ttm_range_mgr_node(res); - return i915_sg_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], + mem->region.start); } else { - return i915_sg_from_buddy_resource(res, mem->region.start); + return i915_rsgt_from_buddy_resource(res, mem->region.start); } } diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index 6f44075920f2..fdee5e7bd46c 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -20,10 +20,11 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv); int intel_region_ttm_init(struct intel_memory_region *mem); -void intel_region_ttm_fini(struct intel_memory_region *mem); +int intel_region_ttm_fini(struct intel_memory_region *mem); -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res); +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res); void intel_region_ttm_resource_free(struct intel_memory_region *mem, struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index eaf7688f517d..22dab36afcb6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -65,16 +65,6 @@ static noinline depot_stack_handle_t __save_depot_stack(void) return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); } -static void __print_depot_stack(depot_stack_handle_t stack, - char *buf, int sz, int indent) -{ - unsigned long *entries; - unsigned int nr_entries; - - nr_entries = stack_depot_fetch(stack, &entries); - stack_trace_snprint(buf, sz, entries, nr_entries, indent); -} - static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); @@ -146,12 +136,12 @@ static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, if (!buf) return; - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); stack = READ_ONCE(rpm->debug.last_release); if (stack) { - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); } @@ -183,12 +173,12 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, return; if (dbg->last_acquire) { - __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); + stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref last acquired:\n%s", buf); } if (dbg->last_release) { - __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); + stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref last released:\n%s", buf); } @@ -203,7 +193,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, rep = 1; while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) rep++, i++; - __print_depot_stack(stack, buf, PAGE_SIZE, 2); + stack_depot_snprint(stack, buf, PAGE_SIZE, 2); drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); } @@ -600,6 +590,9 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) pm_runtime_use_autosuspend(kdev); } + /* Enable by default */ + pm_runtime_allow(kdev); + /* * The core calls the driver load handler with an RPM reference held. * We drop that here and will reacquire it during unloading in diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 183ea2b187fe..47a85fab4130 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#include "display/intel_display.h" - #include "intel_wakeref.h" #include "i915_utils.h" diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c new file mode 100644 index 000000000000..5ba8490a31e6 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sbi.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2013-2021 Intel Corporation + * + * LPT/WPT IOSF sideband. + */ + +#include "i915_drv.h" +#include "intel_sbi.h" + +/* SBI access */ +static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, + enum intel_sbi_destination destination, + u32 *val, bool is_read) +{ + struct intel_uncore *uncore = &i915->uncore; + u32 cmd; + + lockdep_assert_held(&i915->sb_lock); + + if (intel_wait_for_register_fw(uncore, + SBI_CTL_STAT, SBI_BUSY, 0, + 100)) { + drm_err(&i915->drm, + "timeout waiting for SBI to become ready\n"); + return -EBUSY; + } + + intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16); + intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val); + + if (destination == SBI_ICLK) + cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; + else + cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; + if (!is_read) + cmd |= BIT(8); + intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY); + + if (__intel_wait_for_register_fw(uncore, + SBI_CTL_STAT, SBI_BUSY, 0, + 100, 100, &cmd)) { + drm_err(&i915->drm, + "timeout waiting for SBI to complete read\n"); + return -ETIMEDOUT; + } + + if (cmd & SBI_RESPONSE_FAIL) { + drm_err(&i915->drm, "error during SBI read of reg %x\n", reg); + return -ENXIO; + } + + if (is_read) + *val = intel_uncore_read_fw(uncore, SBI_DATA); + + return 0; +} + +u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, + enum intel_sbi_destination destination) +{ + u32 result = 0; + + intel_sbi_rw(i915, reg, destination, &result, true); + + return result; +} + +void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, + enum intel_sbi_destination destination) +{ + intel_sbi_rw(i915, reg, destination, &value, false); +} diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h new file mode 100644 index 000000000000..f5a862210454 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_sbi.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2013-2021 Intel Corporation + */ + +#ifndef _INTEL_SBI_H_ +#define _INTEL_SBI_H_ + +#include <linux/types.h> + +struct drm_i915_private; + +enum intel_sbi_destination { + SBI_ICLK, + SBI_MPHY, +}; + +u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, + enum intel_sbi_destination destination); +void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, + enum intel_sbi_destination destination); + +#endif /* _INTEL_SBI_H_ */ diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c deleted file mode 100644 index e304bf44e1ff..000000000000 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ /dev/null @@ -1,577 +0,0 @@ -/* - * Copyright © 2013 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include <asm/iosf_mbi.h> - -#include "i915_drv.h" -#include "intel_sideband.h" - -/* - * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and - * VLV_VLV2_PUNIT_HAS_0.8.docx - */ - -/* Standard MMIO read, non-posted */ -#define SB_MRD_NP 0x00 -/* Standard MMIO write, non-posted */ -#define SB_MWR_NP 0x01 -/* Private register read, double-word addressing, non-posted */ -#define SB_CRRDDA_NP 0x06 -/* Private register write, double-word addressing, non-posted */ -#define SB_CRWRDA_NP 0x07 - -static void ping(void *info) -{ -} - -static void __vlv_punit_get(struct drm_i915_private *i915) -{ - iosf_mbi_punit_acquire(); - - /* - * Prevent the cpu from sleeping while we use this sideband, otherwise - * the punit may cause a machine hang. The issue appears to be isolated - * with changing the power state of the CPU package while changing - * the power state via the punit, and we have only observed it - * reliably on 4-core Baytail systems suggesting the issue is in the - * power delivery mechanism and likely to be be board/function - * specific. Hence we presume the workaround needs only be applied - * to the Valleyview P-unit and not all sideband communications. - */ - if (IS_VALLEYVIEW(i915)) { - cpu_latency_qos_update_request(&i915->sb_qos, 0); - on_each_cpu(ping, NULL, 1); - } -} - -static void __vlv_punit_put(struct drm_i915_private *i915) -{ - if (IS_VALLEYVIEW(i915)) - cpu_latency_qos_update_request(&i915->sb_qos, - PM_QOS_DEFAULT_VALUE); - - iosf_mbi_punit_release(); -} - -void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports) -{ - if (ports & BIT(VLV_IOSF_SB_PUNIT)) - __vlv_punit_get(i915); - - mutex_lock(&i915->sb_lock); -} - -void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports) -{ - mutex_unlock(&i915->sb_lock); - - if (ports & BIT(VLV_IOSF_SB_PUNIT)) - __vlv_punit_put(i915); -} - -static int vlv_sideband_rw(struct drm_i915_private *i915, - u32 devfn, u32 port, u32 opcode, - u32 addr, u32 *val) -{ - struct intel_uncore *uncore = &i915->uncore; - const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP); - int err; - - lockdep_assert_held(&i915->sb_lock); - if (port == IOSF_PORT_PUNIT) - iosf_mbi_assert_punit_acquired(); - - /* Flush the previous comms, just in case it failed last time. */ - if (intel_wait_for_register(uncore, - VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, - 5)) { - drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n", - is_read ? "read" : "write"); - return -EAGAIN; - } - - preempt_disable(); - - intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr); - intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val); - intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ, - (devfn << IOSF_DEVFN_SHIFT) | - (opcode << IOSF_OPCODE_SHIFT) | - (port << IOSF_PORT_SHIFT) | - (0xf << IOSF_BYTE_ENABLES_SHIFT) | - (0 << IOSF_BAR_SHIFT) | - IOSF_SB_BUSY); - - if (__intel_wait_for_register_fw(uncore, - VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, - 10000, 0, NULL) == 0) { - if (is_read) - *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA); - err = 0; - } else { - drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n", - is_read ? "read" : "write"); - err = -ETIMEDOUT; - } - - preempt_enable(); - - return err; -} - -u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, - SB_CRRDDA_NP, addr, &val); - - return val; -} - -int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val) -{ - return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, - SB_CRWRDA_NP, addr, &val); -} - -u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, - SB_CRRDDA_NP, reg, &val); - - return val; -} - -void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, - SB_CRWRDA_NP, reg, &val); -} - -u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC, - SB_CRRDDA_NP, addr, &val); - - return val; -} - -u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, - SB_CRRDDA_NP, reg, &val); - - return val; -} - -void vlv_iosf_sb_write(struct drm_i915_private *i915, - u8 port, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, - SB_CRWRDA_NP, reg, &val); -} - -u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, - SB_CRRDDA_NP, reg, &val); - - return val; -} - -void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, - SB_CRWRDA_NP, reg, &val); -} - -u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, - SB_CRRDDA_NP, reg, &val); - - return val; -} - -void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, - SB_CRWRDA_NP, reg, &val); -} - -static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy) -{ - /* - * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D) - * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C) - */ - if (IS_CHERRYVIEW(i915)) - return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO; - else - return IOSF_PORT_DPIO; -} - -u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) -{ - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); - u32 val = 0; - - vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); - - /* - * FIXME: There might be some registers where all 1's is a valid value, - * so ideally we should check the register offset instead... - */ - drm_WARN(&i915->drm, val == 0xffffffff, - "DPIO read pipe %c reg 0x%x == 0x%x\n", - pipe_name(pipe), reg, val); - - return val; -} - -void vlv_dpio_write(struct drm_i915_private *i915, - enum pipe pipe, int reg, u32 val) -{ - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); - - vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); -} - -u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg) -{ - u32 val = 0; - - vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP, - reg, &val); - return val; -} - -void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val) -{ - vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP, - reg, &val); -} - -/* SBI access */ -static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg, - enum intel_sbi_destination destination, - u32 *val, bool is_read) -{ - struct intel_uncore *uncore = &i915->uncore; - u32 cmd; - - lockdep_assert_held(&i915->sb_lock); - - if (intel_wait_for_register_fw(uncore, - SBI_CTL_STAT, SBI_BUSY, 0, - 100)) { - drm_err(&i915->drm, - "timeout waiting for SBI to become ready\n"); - return -EBUSY; - } - - intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16); - intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val); - - if (destination == SBI_ICLK) - cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD; - else - cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; - if (!is_read) - cmd |= BIT(8); - intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY); - - if (__intel_wait_for_register_fw(uncore, - SBI_CTL_STAT, SBI_BUSY, 0, - 100, 100, &cmd)) { - drm_err(&i915->drm, - "timeout waiting for SBI to complete read\n"); - return -ETIMEDOUT; - } - - if (cmd & SBI_RESPONSE_FAIL) { - drm_err(&i915->drm, "error during SBI read of reg %x\n", reg); - return -ENXIO; - } - - if (is_read) - *val = intel_uncore_read_fw(uncore, SBI_DATA); - - return 0; -} - -u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, - enum intel_sbi_destination destination) -{ - u32 result = 0; - - intel_sbi_rw(i915, reg, destination, &result, true); - - return result; -} - -void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, - enum intel_sbi_destination destination) -{ - intel_sbi_rw(i915, reg, destination, &value, false); -} - -static int gen6_check_mailbox_status(u32 mbox) -{ - switch (mbox & GEN6_PCODE_ERROR_MASK) { - case GEN6_PCODE_SUCCESS: - return 0; - case GEN6_PCODE_UNIMPLEMENTED_CMD: - return -ENODEV; - case GEN6_PCODE_ILLEGAL_CMD: - return -ENXIO; - case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - return -EOVERFLOW; - case GEN6_PCODE_TIMEOUT: - return -ETIMEDOUT; - default: - MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); - return 0; - } -} - -static int gen7_check_mailbox_status(u32 mbox) -{ - switch (mbox & GEN6_PCODE_ERROR_MASK) { - case GEN6_PCODE_SUCCESS: - return 0; - case GEN6_PCODE_ILLEGAL_CMD: - return -ENXIO; - case GEN7_PCODE_TIMEOUT: - return -ETIMEDOUT; - case GEN7_PCODE_ILLEGAL_DATA: - return -EINVAL; - case GEN11_PCODE_ILLEGAL_SUBCOMMAND: - return -ENXIO; - case GEN11_PCODE_LOCKED: - return -EBUSY; - case GEN11_PCODE_REJECTED: - return -EACCES; - case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: - return -EOVERFLOW; - default: - MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK); - return 0; - } -} - -static int __sandybridge_pcode_rw(struct drm_i915_private *i915, - u32 mbox, u32 *val, u32 *val1, - int fast_timeout_us, - int slow_timeout_ms, - bool is_read) -{ - struct intel_uncore *uncore = &i915->uncore; - - lockdep_assert_held(&i915->sb_lock); - - /* - * GEN6_PCODE_* are outside of the forcewake domain, we can use - * intel_uncore_read/write_fw variants to reduce the amount of work - * required when reading/writing. - */ - - if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) - return -EAGAIN; - - intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val); - intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0); - intel_uncore_write_fw(uncore, - GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - - if (__intel_wait_for_register_fw(uncore, - GEN6_PCODE_MAILBOX, - GEN6_PCODE_READY, 0, - fast_timeout_us, - slow_timeout_ms, - &mbox)) - return -ETIMEDOUT; - - if (is_read) - *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA); - if (is_read && val1) - *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); - - if (GRAPHICS_VER(i915) > 6) - return gen7_check_mailbox_status(mbox); - else - return gen6_check_mailbox_status(mbox); -} - -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1) -{ - int err; - - mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, val, val1, - 500, 20, - true); - mutex_unlock(&i915->sb_lock); - - if (err) { - drm_dbg(&i915->drm, - "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n", - mbox, __builtin_return_address(0), err); - } - - return err; -} - -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, - u32 mbox, u32 val, - int fast_timeout_us, - int slow_timeout_ms) -{ - int err; - - mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, - fast_timeout_us, slow_timeout_ms, - false); - mutex_unlock(&i915->sb_lock); - - if (err) { - drm_dbg(&i915->drm, - "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n", - val, mbox, __builtin_return_address(0), err); - } - - return err; -} - -static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, - u32 request, u32 reply_mask, u32 reply, - u32 *status) -{ - *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, - 500, 0, - true); - - return *status || ((request & reply_mask) == reply); -} - -/** - * skl_pcode_request - send PCODE request until acknowledgment - * @i915: device private - * @mbox: PCODE mailbox ID the request is targeted for - * @request: request ID - * @reply_mask: mask used to check for request acknowledgment - * @reply: value used to check for request acknowledgment - * @timeout_base_ms: timeout for polling with preemption enabled - * - * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE - * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. - * The request is acknowledged once the PCODE reply dword equals @reply after - * applying @reply_mask. Polling is first attempted with preemption enabled - * for @timeout_base_ms and if this times out for another 50 ms with - * preemption disabled. - * - * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some - * other error as reported by PCODE. - */ -int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, - u32 reply_mask, u32 reply, int timeout_base_ms) -{ - u32 status; - int ret; - - mutex_lock(&i915->sb_lock); - -#define COND \ - skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status) - - /* - * Prime the PCODE by doing a request first. Normally it guarantees - * that a subsequent request, at most @timeout_base_ms later, succeeds. - * _wait_for() doesn't guarantee when its passed condition is evaluated - * first, so send the first request explicitly. - */ - if (COND) { - ret = 0; - goto out; - } - ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10); - if (!ret) - goto out; - - /* - * The above can time out if the number of requests was low (2 in the - * worst case) _and_ PCODE was busy for some reason even after a - * (queued) request and @timeout_base_ms delay. As a workaround retry - * the poll with preemption disabled to maximize the number of - * requests. Increase the timeout from @timeout_base_ms to 50ms to - * account for interrupts that could reduce the number of these - * requests, and for any quirks of the PCODE firmware that delays - * the request completion. - */ - drm_dbg_kms(&i915->drm, - "PCODE timeout, retrying with preemption disabled\n"); - drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3); - preempt_disable(); - ret = wait_for_atomic(COND, 50); - preempt_enable(); - -out: - mutex_unlock(&i915->sb_lock); - return ret ? ret : status; -#undef COND -} - -int intel_pcode_init(struct drm_i915_private *i915) -{ - int ret = 0; - - if (!IS_DGFX(i915)) - return ret; - - ret = skl_pcode_request(i915, DG1_PCODE_STATUS, - DG1_UNCORE_GET_INIT_STATUS, - DG1_UNCORE_INIT_STATUS_COMPLETE, - DG1_UNCORE_INIT_STATUS_COMPLETE, 180000); - - drm_dbg(&i915->drm, "PCODE init status %d\n", ret); - - if (ret) - drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n"); - - return ret; -} diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index 6cf967631395..a4b16b9e2e55 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -23,7 +23,8 @@ * use a macro to define these to make it easier to identify the platforms * where the two steppings can deviate. */ -#define COMMON_STEP(x) .gt_step = STEP_##x, .display_step = STEP_##x +#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x +#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x static const struct intel_step_info skl_revids[] = { [0x6] = { COMMON_STEP(G0) }, @@ -33,13 +34,13 @@ static const struct intel_step_info skl_revids[] = { }; static const struct intel_step_info kbl_revids[] = { - [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 }, - [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 }, - [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 }, - [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 }, - [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 }, + [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 }, + [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 }, + [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 }, + [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 }, }; static const struct intel_step_info bxt_revids[] = { @@ -63,16 +64,16 @@ static const struct intel_step_info jsl_ehl_revids[] = { }; static const struct intel_step_info tgl_uy_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, - [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, + [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ static const struct intel_step_info tgl_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 }, }; static const struct intel_step_info rkl_revids[] = { @@ -87,38 +88,38 @@ static const struct intel_step_info dg1_revids[] = { }; static const struct intel_step_info adls_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 }, }; static const struct intel_step_info adlp_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, - [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; static const struct intel_step_info xehpsdv_revids[] = { - [0x0] = { .gt_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1 }, - [0x4] = { .gt_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0) }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1) }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0) }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0) }, }; static const struct intel_step_info dg2_g10_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, }; static const struct intel_step_info dg2_g11_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, }; void intel_step_init(struct drm_i915_private *i915) @@ -179,7 +180,7 @@ void intel_step_init(struct drm_i915_private *i915) if (!revids) return; - if (revid < size && revids[revid].gt_step != STEP_NONE) { + if (revid < size && revids[revid].graphics_step != STEP_NONE) { step = revids[revid]; } else { drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid); @@ -192,7 +193,7 @@ void intel_step_init(struct drm_i915_private *i915) * steppings in the array are not monotonically increasing, but * it's better than defaulting to 0. */ - while (revid < size && revids[revid].gt_step == STEP_NONE) + while (revid < size && revids[revid].graphics_step == STEP_NONE) revid++; if (revid < size) { @@ -201,12 +202,12 @@ void intel_step_init(struct drm_i915_private *i915) step = revids[revid]; } else { drm_dbg(&i915->drm, "Using future steppings\n"); - step.gt_step = STEP_FUTURE; + step.graphics_step = STEP_FUTURE; step.display_step = STEP_FUTURE; } } - if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE)) + if (drm_WARN_ON(&i915->drm, step.graphics_step == STEP_NONE)) return; RUNTIME_INFO(i915)->step = step; diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h index f6641e2a3c77..d71a99bd5179 100644 --- a/drivers/gpu/drm/i915/intel_step.h +++ b/drivers/gpu/drm/i915/intel_step.h @@ -11,8 +11,9 @@ struct drm_i915_private; struct intel_step_info { - u8 gt_step; + u8 graphics_step; u8 display_step; + u8 media_step; }; #define STEP_ENUM_VAL(name) STEP_##name, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 6b38bc2811c1..abdac78d3976 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -22,11 +22,11 @@ */ #include <linux/pm_runtime.h> -#include <asm/iosf_mbi.h> #include "gt/intel_lrc_reg.h" /* for shadow reg list */ #include "i915_drv.h" +#include "i915_iosf_mbi.h" #include "i915_trace.h" #include "i915_vgpu.h" #include "intel_pm.h" @@ -36,6 +36,12 @@ #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) +static void +fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) +{ + uncore->fw_get_funcs->force_wake_get(uncore, fw_domains); +} + void intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) { @@ -64,7 +70,7 @@ static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) static const char * const forcewake_domain_names[] = { "render", - "blitter", + "gt", "media", "vdbox0", "vdbox1", @@ -248,7 +254,7 @@ fw_domain_put(const struct intel_uncore_forcewake_domain *d) } static void -fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) +fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; @@ -340,7 +346,7 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - fw_domains_get(uncore, fw_domains); + fw_domains_get_normal(uncore, fw_domains); /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ __gen6_gt_wait_for_thread_c0(uncore); @@ -396,7 +402,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) GEM_BUG_ON(!domain->wake_count); if (--domain->wake_count == 0) - uncore->funcs.force_wake_put(uncore, domain->mask); + fw_domains_put(uncore, domain->mask); spin_unlock_irqrestore(&uncore->lock, irqflags); @@ -454,7 +460,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore) fw = uncore->fw_domains_active; if (fw) - uncore->funcs.force_wake_put(uncore, fw); + fw_domains_put(uncore, fw); fw_domains_reset(uncore, uncore->fw_domains); assert_forcewakes_inactive(uncore); @@ -562,7 +568,7 @@ static void forcewake_early_sanitize(struct intel_uncore *uncore, intel_uncore_forcewake_reset(uncore); if (restore_forcewake) { spin_lock_irq(&uncore->lock); - uncore->funcs.force_wake_get(uncore, restore_forcewake); + fw_domains_get(uncore, restore_forcewake); if (intel_uncore_has_fifo(uncore)) uncore->fifo_count = fifo_free_entries(uncore); @@ -623,7 +629,7 @@ static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, } if (fw_domains) - uncore->funcs.force_wake_get(uncore, fw_domains); + fw_domains_get(uncore, fw_domains); } /** @@ -644,7 +650,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, { unsigned long irqflags; - if (!uncore->funcs.force_wake_get) + if (!uncore->fw_get_funcs) return; assert_rpm_wakelock_held(uncore->rpm); @@ -711,7 +717,7 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, { lockdep_assert_held(&uncore->lock); - if (!uncore->funcs.force_wake_get) + if (!uncore->fw_get_funcs) return; __intel_uncore_forcewake_get(uncore, fw_domains); @@ -733,7 +739,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, continue; } - uncore->funcs.force_wake_put(uncore, domain->mask); + fw_domains_put(uncore, domain->mask); } } @@ -750,7 +756,7 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore, { unsigned long irqflags; - if (!uncore->funcs.force_wake_put) + if (!uncore->fw_get_funcs) return; spin_lock_irqsave(&uncore->lock, irqflags); @@ -769,7 +775,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore, struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - if (!uncore->funcs.force_wake_put) + if (!uncore->fw_get_funcs) return; fw_domains &= uncore->fw_domains; @@ -793,7 +799,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, { lockdep_assert_held(&uncore->lock); - if (!uncore->funcs.force_wake_put) + if (!uncore->fw_get_funcs) return; __intel_uncore_forcewake_put(uncore, fw_domains); @@ -801,7 +807,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, void assert_forcewakes_inactive(struct intel_uncore *uncore) { - if (!uncore->funcs.force_wake_get) + if (!uncore->fw_get_funcs) return; drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, @@ -818,7 +824,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore, if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) return; - if (!uncore->funcs.force_wake_get) + if (!uncore->fw_get_funcs) return; spin_lock_irq(&uncore->lock); @@ -851,16 +857,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore, } /* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) - -#define __gen6_reg_read_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd; \ - if (NEEDS_FORCE_WAKE(offset)) \ - __fwd = FORCEWAKE_RENDER; \ - else \ - __fwd = 0; \ - __fwd; \ +#define NEEDS_FORCE_WAKE(reg) ({ \ + u32 __reg = (reg); \ + __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \ }) static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) @@ -942,125 +941,146 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { __fwd; \ }) -#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ - find_fw_domain(uncore, offset) - -#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ - find_fw_domain(uncore, offset) - /* *Must* be sorted by offset! See intel_shadow_table_check(). */ -static const i915_reg_t gen8_shadowed_regs[] = { - RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ - GEN6_RPNSWREQ, /* 0xA008 */ - GEN6_RC_VIDEO_FREQ, /* 0xA00C */ - RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ - RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ - RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ +static const struct i915_range gen8_shadowed_regs[] = { + { .start = 0x2030, .end = 0x2030 }, + { .start = 0xA008, .end = 0xA00C }, + { .start = 0x12030, .end = 0x12030 }, + { .start = 0x1a030, .end = 0x1a030 }, + { .start = 0x22030, .end = 0x22030 }, /* TODO: Other registers are not yet used */ }; -static const i915_reg_t gen11_shadowed_regs[] = { - RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ - RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */ - GEN6_RPNSWREQ, /* 0xA008 */ - GEN6_RC_VIDEO_FREQ, /* 0xA00C */ - RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ - RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */ - RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */ - RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */ - RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */ - RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */ - RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */ - RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */ - /* TODO: Other registers are not yet used */ +static const struct i915_range gen11_shadowed_regs[] = { + { .start = 0x2030, .end = 0x2030 }, + { .start = 0x2550, .end = 0x2550 }, + { .start = 0xA008, .end = 0xA00C }, + { .start = 0x22030, .end = 0x22030 }, + { .start = 0x22230, .end = 0x22230 }, + { .start = 0x22510, .end = 0x22550 }, + { .start = 0x1C0030, .end = 0x1C0030 }, + { .start = 0x1C0230, .end = 0x1C0230 }, + { .start = 0x1C0510, .end = 0x1C0550 }, + { .start = 0x1C4030, .end = 0x1C4030 }, + { .start = 0x1C4230, .end = 0x1C4230 }, + { .start = 0x1C4510, .end = 0x1C4550 }, + { .start = 0x1C8030, .end = 0x1C8030 }, + { .start = 0x1C8230, .end = 0x1C8230 }, + { .start = 0x1C8510, .end = 0x1C8550 }, + { .start = 0x1D0030, .end = 0x1D0030 }, + { .start = 0x1D0230, .end = 0x1D0230 }, + { .start = 0x1D0510, .end = 0x1D0550 }, + { .start = 0x1D4030, .end = 0x1D4030 }, + { .start = 0x1D4230, .end = 0x1D4230 }, + { .start = 0x1D4510, .end = 0x1D4550 }, + { .start = 0x1D8030, .end = 0x1D8030 }, + { .start = 0x1D8230, .end = 0x1D8230 }, + { .start = 0x1D8510, .end = 0x1D8550 }, }; -static const i915_reg_t gen12_shadowed_regs[] = { - RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ - RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */ - GEN6_RPNSWREQ, /* 0xA008 */ - GEN6_RC_VIDEO_FREQ, /* 0xA00C */ - RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ - RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */ - RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */ - RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */ - RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */ - RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */ - RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */ - RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */ - /* TODO: Other registers are not yet used */ +static const struct i915_range gen12_shadowed_regs[] = { + { .start = 0x2030, .end = 0x2030 }, + { .start = 0x2510, .end = 0x2550 }, + { .start = 0xA008, .end = 0xA00C }, + { .start = 0xA188, .end = 0xA188 }, + { .start = 0xA278, .end = 0xA278 }, + { .start = 0xA540, .end = 0xA56C }, + { .start = 0xC4C8, .end = 0xC4C8 }, + { .start = 0xC4D4, .end = 0xC4D4 }, + { .start = 0xC600, .end = 0xC600 }, + { .start = 0x22030, .end = 0x22030 }, + { .start = 0x22510, .end = 0x22550 }, + { .start = 0x1C0030, .end = 0x1C0030 }, + { .start = 0x1C0510, .end = 0x1C0550 }, + { .start = 0x1C4030, .end = 0x1C4030 }, + { .start = 0x1C4510, .end = 0x1C4550 }, + { .start = 0x1C8030, .end = 0x1C8030 }, + { .start = 0x1C8510, .end = 0x1C8550 }, + { .start = 0x1D0030, .end = 0x1D0030 }, + { .start = 0x1D0510, .end = 0x1D0550 }, + { .start = 0x1D4030, .end = 0x1D4030 }, + { .start = 0x1D4510, .end = 0x1D4550 }, + { .start = 0x1D8030, .end = 0x1D8030 }, + { .start = 0x1D8510, .end = 0x1D8550 }, + + /* + * The rest of these ranges are specific to Xe_HP and beyond, but + * are reserved/unused ranges on earlier gen12 platforms, so they can + * be safely added to the gen12 table. + */ + { .start = 0x1E0030, .end = 0x1E0030 }, + { .start = 0x1E0510, .end = 0x1E0550 }, + { .start = 0x1E4030, .end = 0x1E4030 }, + { .start = 0x1E4510, .end = 0x1E4550 }, + { .start = 0x1E8030, .end = 0x1E8030 }, + { .start = 0x1E8510, .end = 0x1E8550 }, + { .start = 0x1F0030, .end = 0x1F0030 }, + { .start = 0x1F0510, .end = 0x1F0550 }, + { .start = 0x1F4030, .end = 0x1F4030 }, + { .start = 0x1F4510, .end = 0x1F4550 }, + { .start = 0x1F8030, .end = 0x1F8030 }, + { .start = 0x1F8510, .end = 0x1F8550 }, }; -static const i915_reg_t xehp_shadowed_regs[] = { - RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ - RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */ - GEN6_RPNSWREQ, /* 0xA008 */ - GEN6_RC_VIDEO_FREQ, /* 0xA00C */ - RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ - RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */ - RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */ - RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */ - RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */ - RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */ - RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ - RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */ - RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ - RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */ - RING_TAIL(XEHP_BSD5_RING_BASE), /* 0x1E0000 (base) */ - RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE), /* 0x1E0550 */ - RING_TAIL(XEHP_BSD6_RING_BASE), /* 0x1E4000 (base) */ - RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE), /* 0x1E4550 */ - RING_TAIL(XEHP_VEBOX3_RING_BASE), /* 0x1E8000 (base) */ - RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE), /* 0x1E8550 */ - RING_TAIL(XEHP_BSD7_RING_BASE), /* 0x1F0000 (base) */ - RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE), /* 0x1F0550 */ - RING_TAIL(XEHP_BSD8_RING_BASE), /* 0x1F4000 (base) */ - RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE), /* 0x1F4550 */ - RING_TAIL(XEHP_VEBOX4_RING_BASE), /* 0x1F8000 (base) */ - RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE), /* 0x1F8550 */ - /* TODO: Other registers are not yet used */ +static const struct i915_range dg2_shadowed_regs[] = { + { .start = 0x2030, .end = 0x2030 }, + { .start = 0x2510, .end = 0x2550 }, + { .start = 0xA008, .end = 0xA00C }, + { .start = 0xA188, .end = 0xA188 }, + { .start = 0xA278, .end = 0xA278 }, + { .start = 0xA540, .end = 0xA56C }, + { .start = 0xC4C8, .end = 0xC4C8 }, + { .start = 0xC4E0, .end = 0xC4E0 }, + { .start = 0xC600, .end = 0xC600 }, + { .start = 0xC658, .end = 0xC658 }, + { .start = 0x22030, .end = 0x22030 }, + { .start = 0x22510, .end = 0x22550 }, + { .start = 0x1C0030, .end = 0x1C0030 }, + { .start = 0x1C0510, .end = 0x1C0550 }, + { .start = 0x1C4030, .end = 0x1C4030 }, + { .start = 0x1C4510, .end = 0x1C4550 }, + { .start = 0x1C8030, .end = 0x1C8030 }, + { .start = 0x1C8510, .end = 0x1C8550 }, + { .start = 0x1D0030, .end = 0x1D0030 }, + { .start = 0x1D0510, .end = 0x1D0550 }, + { .start = 0x1D4030, .end = 0x1D4030 }, + { .start = 0x1D4510, .end = 0x1D4550 }, + { .start = 0x1D8030, .end = 0x1D8030 }, + { .start = 0x1D8510, .end = 0x1D8550 }, + { .start = 0x1E0030, .end = 0x1E0030 }, + { .start = 0x1E0510, .end = 0x1E0550 }, + { .start = 0x1E4030, .end = 0x1E4030 }, + { .start = 0x1E4510, .end = 0x1E4550 }, + { .start = 0x1E8030, .end = 0x1E8030 }, + { .start = 0x1E8510, .end = 0x1E8550 }, + { .start = 0x1F0030, .end = 0x1F0030 }, + { .start = 0x1F0510, .end = 0x1F0550 }, + { .start = 0x1F4030, .end = 0x1F4030 }, + { .start = 0x1F4510, .end = 0x1F4550 }, + { .start = 0x1F8030, .end = 0x1F8030 }, + { .start = 0x1F8510, .end = 0x1F8550 }, }; -static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) +static int mmio_range_cmp(u32 key, const struct i915_range *range) { - u32 offset = i915_mmio_reg_offset(*reg); - - if (key < offset) + if (key < range->start) return -1; - else if (key > offset) + else if (key > range->end) return 1; else return 0; } -#define __is_X_shadowed(x) \ -static bool is_##x##_shadowed(u32 offset) \ -{ \ - const i915_reg_t *regs = x##_shadowed_regs; \ - return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \ - mmio_reg_cmp); \ -} +static bool is_shadowed(struct intel_uncore *uncore, u32 offset) +{ + if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)) + return false; -__is_X_shadowed(gen8) -__is_X_shadowed(gen11) -__is_X_shadowed(gen12) -__is_X_shadowed(xehp) + return BSEARCH(offset, + uncore->shadowed_reg_table, + uncore->shadowed_reg_table_entries, + mmio_range_cmp); +} static enum forcewake_domains gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) @@ -1068,15 +1088,9 @@ gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) return FORCEWAKE_RENDER; } -#define __gen8_reg_write_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd; \ - if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ - __fwd = FORCEWAKE_RENDER; \ - else \ - __fwd = 0; \ - __fwd; \ -}) +static const struct intel_forcewake_range __gen6_fw_ranges[] = { + GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER), +}; /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ static const struct intel_forcewake_range __chv_fw_ranges[] = { @@ -1101,34 +1115,8 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { #define __fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ - if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ - __fwd = find_fw_domain(uncore, offset); \ - __fwd; \ -}) - -#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd = 0; \ - const u32 __offset = (offset); \ - if (!is_gen11_shadowed(__offset)) \ - __fwd = find_fw_domain(uncore, __offset); \ - __fwd; \ -}) - -#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd = 0; \ - const u32 __offset = (offset); \ - if (!is_gen12_shadowed(__offset)) \ - __fwd = find_fw_domain(uncore, __offset); \ - __fwd; \ -}) - -#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd = 0; \ const u32 __offset = (offset); \ - if (!is_xehp_shadowed(__offset)) \ + if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \ __fwd = find_fw_domain(uncore, __offset); \ __fwd; \ }) @@ -1605,7 +1593,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore, for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) fw_domain_arm_timer(domain); - uncore->funcs.force_wake_get(uncore, fw_domains); + fw_domains_get(uncore, fw_domains); } static inline void __force_wake_auto(struct intel_uncore *uncore, @@ -1621,35 +1609,30 @@ static inline void __force_wake_auto(struct intel_uncore *uncore, ___force_wake_auto(uncore, fw_domains); } -#define __gen_read(func, x) \ +#define __gen_fwtable_read(x) \ static u##x \ -func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ +fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \ +{ \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ + fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \ if (fw_engine) \ __force_wake_auto(uncore, fw_engine); \ val = __raw_uncore_read##x(uncore, reg); \ GEN6_READ_FOOTER; \ } -#define __gen_reg_read_funcs(func) \ -static enum forcewake_domains \ -func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ - return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ -} \ -\ -__gen_read(func, 8) \ -__gen_read(func, 16) \ -__gen_read(func, 32) \ -__gen_read(func, 64) +static enum forcewake_domains +fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { + return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); +} -__gen_reg_read_funcs(gen12_fwtable); -__gen_reg_read_funcs(gen11_fwtable); -__gen_reg_read_funcs(fwtable); -__gen_reg_read_funcs(gen6); +__gen_fwtable_read(8) +__gen_fwtable_read(16) +__gen_fwtable_read(32) +__gen_fwtable_read(64) -#undef __gen_reg_read_funcs +#undef __gen_fwtable_read #undef GEN6_READ_FOOTER #undef GEN6_READ_HEADER @@ -1714,35 +1697,29 @@ __gen6_write(8) __gen6_write(16) __gen6_write(32) -#define __gen_write(func, x) \ +#define __gen_fwtable_write(x) \ static void \ -func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ +fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ + fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \ if (fw_engine) \ __force_wake_auto(uncore, fw_engine); \ __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } -#define __gen_reg_write_funcs(func) \ -static enum forcewake_domains \ -func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ - return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ -} \ -\ -__gen_write(func, 8) \ -__gen_write(func, 16) \ -__gen_write(func, 32) +static enum forcewake_domains +fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) +{ + return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); +} -__gen_reg_write_funcs(xehp_fwtable); -__gen_reg_write_funcs(gen12_fwtable); -__gen_reg_write_funcs(gen11_fwtable); -__gen_reg_write_funcs(fwtable); -__gen_reg_write_funcs(gen8); +__gen_fwtable_write(8) +__gen_fwtable_write(16) +__gen_fwtable_write(32) -#undef __gen_reg_write_funcs +#undef __gen_fwtable_write #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER @@ -1866,6 +1843,18 @@ static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) fw_domain_fini(uncore, d->id); } +static const struct intel_uncore_fw_get uncore_get_fallback = { + .force_wake_get = fw_domains_get_with_fallback +}; + +static const struct intel_uncore_fw_get uncore_get_normal = { + .force_wake_get = fw_domains_get_normal, +}; + +static const struct intel_uncore_fw_get uncore_get_thread_status = { + .force_wake_get = fw_domains_get_with_thread_status +}; + static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; @@ -1881,8 +1870,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; int i; - uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_fallback; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); @@ -1907,8 +1895,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } } else if (IS_GRAPHICS_VER(i915, 9, 10)) { - uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_fallback; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); @@ -1918,16 +1905,13 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - uncore->funcs.force_wake_get = fw_domains_get; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_normal; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { - uncore->funcs.force_wake_get = - fw_domains_get_with_thread_status; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_thread_status; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); } else if (IS_IVYBRIDGE(i915)) { @@ -1942,9 +1926,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) * (correctly) interpreted by the test below as MT * forcewake being disabled. */ - uncore->funcs.force_wake_get = - fw_domains_get_with_thread_status; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_thread_status; /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is @@ -1975,9 +1957,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE, FORCEWAKE_ACK); } } else if (GRAPHICS_VER(i915) == 6) { - uncore->funcs.force_wake_get = - fw_domains_get_with_thread_status; - uncore->funcs.force_wake_put = fw_domains_put; + uncore->fw_get_funcs = &uncore_get_thread_status; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } @@ -2001,6 +1981,12 @@ out: (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ } +#define ASSIGN_SHADOW_TABLE(uncore, d) \ +{ \ + (uncore)->shadowed_reg_table = d; \ + (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \ +} + static int i915_pmic_bus_access_notifier(struct notifier_block *nb, unsigned long action, void *data) { @@ -2034,7 +2020,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static int uncore_mmio_setup(struct intel_uncore *uncore) +int intel_uncore_setup_mmio(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -2067,7 +2053,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) return 0; } -static void uncore_mmio_cleanup(struct intel_uncore *uncore) +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) { struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev); @@ -2111,40 +2097,42 @@ static int uncore_forcewake_init(struct intel_uncore *uncore) return ret; forcewake_early_sanitize(uncore, 0); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); + if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); + ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); + ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER(i915) >= 12) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); + ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER(i915) == 11) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); + ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (IS_GRAPHICS_VER(i915, 9, 10)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); + ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else if (IS_CHERRYVIEW(i915)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); + ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); - ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else if (GRAPHICS_VER(i915) == 8) { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges); + ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); } else if (IS_VALLEYVIEW(i915)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); - ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else if (IS_GRAPHICS_VER(i915, 6, 7)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); } uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; @@ -2158,10 +2146,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) struct drm_i915_private *i915 = uncore->i915; int ret; - ret = uncore_mmio_setup(uncore); - if (ret) - return ret; - /* * The boot firmware initializes local memory and assesses its health. * If memory training fails, the punit will have been instructed to @@ -2182,12 +2166,11 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) } else { ret = uncore_forcewake_init(uncore); if (ret) - goto out_mmio_cleanup; + return ret; } /* make sure fw funcs are set if and only if we have fw*/ - GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); - GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); @@ -2205,11 +2188,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); return 0; - -out_mmio_cleanup: - uncore_mmio_cleanup(uncore); - - return ret; } /* @@ -2274,8 +2252,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) intel_uncore_fw_domains_fini(uncore); iosf_mbi_punit_release(); } - - uncore_mmio_cleanup(uncore); } static const struct reg_whitelist { diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 3c0b0a8b5250..d1d17b04e29f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -84,12 +84,12 @@ enum forcewake_domains { FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1, }; -struct intel_uncore_funcs { +struct intel_uncore_fw_get { void (*force_wake_get)(struct intel_uncore *uncore, enum forcewake_domains domains); - void (*force_wake_put)(struct intel_uncore *uncore, - enum forcewake_domains domains); +}; +struct intel_uncore_funcs { enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore, i915_reg_t r); enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore, @@ -119,6 +119,12 @@ struct intel_forcewake_range { enum forcewake_domains domains; }; +/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */ +struct i915_range { + u32 start; + u32 end; +}; + struct intel_uncore { void __iomem *regs; @@ -136,7 +142,15 @@ struct intel_uncore { const struct intel_forcewake_range *fw_domains_table; unsigned int fw_domains_table_entries; + /* + * Shadowed registers are special cases where we can safely write + * to the register *without* grabbing forcewake. + */ + const struct i915_range *shadowed_reg_table; + unsigned int shadowed_reg_table_entries; + struct notifier_block pmic_bus_access_nb; + const struct intel_uncore_fw_get *fw_get_funcs; struct intel_uncore_funcs funcs; unsigned int fifo_count; @@ -204,11 +218,13 @@ void intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); void intel_uncore_init_early(struct intel_uncore *uncore, struct drm_i915_private *i915); +int intel_uncore_setup_mmio(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, struct intel_gt *gt); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); void intel_uncore_fini_mmio(struct intel_uncore *uncore); void intel_uncore_suspend(struct intel_uncore *uncore); void intel_uncore_resume_early(struct intel_uncore *uncore); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 545c8f277c46..4f4c2e15e736 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -123,6 +123,12 @@ enum { __INTEL_WAKEREF_PUT_LAST_BIT__ }; +static inline void +intel_wakeref_might_get(struct intel_wakeref *wf) +{ + might_lock(&wf->mutex); +} + /** * intel_wakeref_put_flags: Release the wakeref * @wf: the wakeref @@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); } +static inline void +intel_wakeref_might_put(struct intel_wakeref *wf) +{ + might_lock(&wf->mutex); +} + /** * intel_wakeref_lock: Lock the wakeref (mutex) * @wf: the wakeref diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c new file mode 100644 index 000000000000..15311eaed848 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020 Intel Corporation. + */ +#include <linux/workqueue.h> +#include "intel_pxp.h" +#include "intel_pxp_irq.h" +#include "intel_pxp_session.h" +#include "intel_pxp_tee.h" +#include "gem/i915_gem_context.h" +#include "gt/intel_context.h" +#include "i915_drv.h" + +/** + * DOC: PXP + * + * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms. + * It allows execution and flip to display of protected (i.e. encrypted) + * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig. + * + * Objects can opt-in to PXP encryption at creation time via the + * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be + * correctly protected they must be used in conjunction with a context created + * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation + * of those two uapi flags for details and restrictions. + * + * Protected objects are tied to a pxp session; currently we only support one + * session, which i915 manages and whose index is available in the uapi + * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting + * protected objects. + * The session is invalidated by the HW when certain events occur (e.g. + * suspend/resume). When this happens, all the objects that were used with the + * session are marked as invalid and all contexts marked as using protected + * content are banned. Any further attempt at using them in an execbuf call is + * rejected, while flips are converted to black frames. + * + * Some of the PXP setup operations are performed by the Management Engine, + * which is handled by the mei driver; communication between i915 and mei is + * performed via the mei_pxp component module. + */ + +struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp) +{ + return container_of(pxp, struct intel_gt, pxp); +} + +bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return pxp->ce; +} + +bool intel_pxp_is_active(const struct intel_pxp *pxp) +{ + return pxp->arb_is_valid; +} + +/* KCR register definitions */ +#define KCR_INIT _MMIO(0x320f0) +/* Setting KCR Init bit is required after system boot */ +#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14) + +static void kcr_pxp_enable(struct intel_gt *gt) +{ + intel_uncore_write(gt->uncore, KCR_INIT, + _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES)); +} + +static void kcr_pxp_disable(struct intel_gt *gt) +{ + intel_uncore_write(gt->uncore, KCR_INIT, + _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES)); +} + +static int create_vcs_context(struct intel_pxp *pxp) +{ + static struct lock_class_key pxp_lock; + struct intel_gt *gt = pxp_to_gt(pxp); + struct intel_engine_cs *engine; + struct intel_context *ce; + int i; + + /* + * Find the first VCS engine present. We're guaranteed there is one + * if we're in this function due to the check in has_pxp + */ + for (i = 0, engine = NULL; !engine; i++) + engine = gt->engine_class[VIDEO_DECODE_CLASS][i]; + + GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS); + + ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, + I915_GEM_HWS_PXP_ADDR, + &pxp_lock, "pxp_context"); + if (IS_ERR(ce)) { + drm_err(>->i915->drm, "failed to create VCS ctx for PXP\n"); + return PTR_ERR(ce); + } + + pxp->ce = ce; + + return 0; +} + +static void destroy_vcs_context(struct intel_pxp *pxp) +{ + intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce)); +} + +void intel_pxp_init(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + int ret; + + if (!HAS_PXP(gt->i915)) + return; + + mutex_init(&pxp->tee_mutex); + + /* + * we'll use the completion to check if there is a termination pending, + * so we start it as completed and we reinit it when a termination + * is triggered. + */ + init_completion(&pxp->termination); + complete_all(&pxp->termination); + + mutex_init(&pxp->arb_mutex); + INIT_WORK(&pxp->session_work, intel_pxp_session_work); + + ret = create_vcs_context(pxp); + if (ret) + return; + + ret = intel_pxp_tee_component_init(pxp); + if (ret) + goto out_context; + + drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n"); + + return; + +out_context: + destroy_vcs_context(pxp); +} + +void intel_pxp_fini(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_tee_component_fini(pxp); + + destroy_vcs_context(pxp); +} + +void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp) +{ + pxp->arb_is_valid = false; + reinit_completion(&pxp->termination); +} + +static void pxp_queue_termination(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + + /* + * We want to get the same effect as if we received a termination + * interrupt, so just pretend that we did. + */ + spin_lock_irq(>->irq_lock); + intel_pxp_mark_termination_in_progress(pxp); + pxp->session_events |= PXP_TERMINATION_REQUEST; + queue_work(system_unbound_wq, &pxp->session_work); + spin_unlock_irq(>->irq_lock); +} + +/* + * the arb session is restarted from the irq work when we receive the + * termination completion interrupt + */ +int intel_pxp_start(struct intel_pxp *pxp) +{ + int ret = 0; + + if (!intel_pxp_is_enabled(pxp)) + return -ENODEV; + + mutex_lock(&pxp->arb_mutex); + + if (pxp->arb_is_valid) + goto unlock; + + pxp_queue_termination(pxp); + + if (!wait_for_completion_timeout(&pxp->termination, + msecs_to_jiffies(250))) { + ret = -ETIMEDOUT; + goto unlock; + } + + /* make sure the compiler doesn't optimize the double access */ + barrier(); + + if (!pxp->arb_is_valid) + ret = -EIO; + +unlock: + mutex_unlock(&pxp->arb_mutex); + return ret; +} + +void intel_pxp_init_hw(struct intel_pxp *pxp) +{ + kcr_pxp_enable(pxp_to_gt(pxp)); + intel_pxp_irq_enable(pxp); +} + +void intel_pxp_fini_hw(struct intel_pxp *pxp) +{ + kcr_pxp_disable(pxp_to_gt(pxp)); + + intel_pxp_irq_disable(pxp); +} + +int intel_pxp_key_check(struct intel_pxp *pxp, + struct drm_i915_gem_object *obj, + bool assign) +{ + if (!intel_pxp_is_active(pxp)) + return -ENODEV; + + if (!i915_gem_object_is_protected(obj)) + return -EINVAL; + + GEM_BUG_ON(!pxp->key_instance); + + /* + * If this is the first time we're using this object, it's not + * encrypted yet; it will be encrypted with the current key, so mark it + * as such. If the object is already encrypted, check instead if the + * used key is still valid. + */ + if (!obj->pxp_key_instance && assign) + obj->pxp_key_instance = pxp->key_instance; + + if (obj->pxp_key_instance != pxp->key_instance) + return -ENOEXEC; + + return 0; +} + +void intel_pxp_invalidate(struct intel_pxp *pxp) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct i915_gem_context *ctx, *cn; + + /* ban all contexts marked as protected */ + spin_lock_irq(&i915->gem.contexts.lock); + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { + struct i915_gem_engines_iter it; + struct intel_context *ce; + + if (!kref_get_unless_zero(&ctx->ref)) + continue; + + if (likely(!i915_gem_context_uses_protected_content(ctx))) { + i915_gem_context_put(ctx); + continue; + } + + spin_unlock_irq(&i915->gem.contexts.lock); + + /* + * By the time we get here we are either going to suspend with + * quiesced execution or the HW keys are already long gone and + * in this case it is worthless to attempt to close the context + * and wait for its execution. It will hang the GPU if it has + * not already. So, as a fast mitigation, we can ban the + * context as quick as we can. That might race with the + * execbuffer, but currently this is the best that can be done. + */ + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) + intel_context_ban(ce, NULL); + i915_gem_context_unlock_engines(ctx); + + /* + * The context has been banned, no need to keep the wakeref. + * This is safe from races because the only other place this + * is touched is context_release and we're holding a ctx ref + */ + if (ctx->pxp_wakeref) { + intel_runtime_pm_put(&i915->runtime_pm, + ctx->pxp_wakeref); + ctx->pxp_wakeref = 0; + } + + spin_lock_irq(&i915->gem.contexts.lock); + list_safe_reset_next(ctx, cn, link); + i915_gem_context_put(ctx); + } + spin_unlock_irq(&i915->gem.contexts.lock); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h new file mode 100644 index 000000000000..73847e535cab --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_H__ +#define __INTEL_PXP_H__ + +#include <linux/errno.h> +#include <linux/types.h> + +struct intel_pxp; +struct drm_i915_gem_object; + +#ifdef CONFIG_DRM_I915_PXP +struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp); +bool intel_pxp_is_enabled(const struct intel_pxp *pxp); +bool intel_pxp_is_active(const struct intel_pxp *pxp); + +void intel_pxp_init(struct intel_pxp *pxp); +void intel_pxp_fini(struct intel_pxp *pxp); + +void intel_pxp_init_hw(struct intel_pxp *pxp); +void intel_pxp_fini_hw(struct intel_pxp *pxp); + +void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); + +int intel_pxp_start(struct intel_pxp *pxp); + +int intel_pxp_key_check(struct intel_pxp *pxp, + struct drm_i915_gem_object *obj, + bool assign); + +void intel_pxp_invalidate(struct intel_pxp *pxp); +#else +static inline void intel_pxp_init(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_fini(struct intel_pxp *pxp) +{ +} + +static inline int intel_pxp_start(struct intel_pxp *pxp) +{ + return -ENODEV; +} + +static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp) +{ + return false; +} + +static inline bool intel_pxp_is_active(const struct intel_pxp *pxp) +{ + return false; +} + +static inline int intel_pxp_key_check(struct intel_pxp *pxp, + struct drm_i915_gem_object *obj, + bool assign) +{ + return -ENODEV; +} +#endif + +#endif /* __INTEL_PXP_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c new file mode 100644 index 000000000000..f41e45763d0d --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#include "intel_pxp.h" +#include "intel_pxp_cmd.h" +#include "intel_pxp_session.h" +#include "gt/intel_context.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" +#include "gt/intel_ring.h" + +#include "i915_trace.h" + +/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */ +#define MFX_WAIT_PXP (MFX_WAIT | \ + MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \ + MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG) + +static u32 *pxp_emit_session_selection(u32 *cs, u32 idx) +{ + *cs++ = MFX_WAIT_PXP; + + /* pxp off */ + *cs++ = MI_FLUSH_DW; + *cs++ = 0; + *cs++ = 0; + + /* select session */ + *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx); + + *cs++ = MFX_WAIT_PXP; + + /* pxp on */ + *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN | + MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; + *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = 0; + + *cs++ = MFX_WAIT_PXP; + + return cs; +} + +static u32 *pxp_emit_inline_termination(u32 *cs) +{ + /* session inline termination */ + *cs++ = CRYPTO_KEY_EXCHANGE; + *cs++ = 0; + + return cs; +} + +static u32 *pxp_emit_session_termination(u32 *cs, u32 idx) +{ + cs = pxp_emit_session_selection(cs, idx); + cs = pxp_emit_inline_termination(cs); + + return cs; +} + +static u32 *pxp_emit_wait(u32 *cs) +{ + /* wait for cmds to go through */ + *cs++ = MFX_WAIT_PXP; + *cs++ = 0; + + return cs; +} + +/* + * if we ever need to terminate more than one session, we can submit multiple + * selections and terminations back-to-back with a single wait at the end + */ +#define SELECTION_LEN 10 +#define TERMINATION_LEN 2 +#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x)) +#define WAIT_LEN 2 + +static void pxp_request_commit(struct i915_request *rq) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX }; + struct intel_timeline * const tl = i915_request_timeline(rq); + + lockdep_unpin_lock(&tl->mutex, rq->cookie); + + trace_i915_request_add(rq); + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + + mutex_unlock(&tl->mutex); +} + +int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id) +{ + struct i915_request *rq; + struct intel_context *ce = pxp->ce; + u32 *cs; + int err = 0; + + if (!intel_pxp_is_enabled(pxp)) + return 0; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + if (ce->engine->emit_init_breadcrumb) { + err = ce->engine->emit_init_breadcrumb(rq); + if (err) + goto out_rq; + } + + cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_rq; + } + + cs = pxp_emit_session_termination(cs, id); + cs = pxp_emit_wait(cs); + + intel_ring_advance(rq, cs); + +out_rq: + i915_request_get(rq); + + if (unlikely(err)) + i915_request_set_error_once(rq, err); + + pxp_request_commit(rq); + + if (!err && i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + + i915_request_put(rq); + + return err; +} + diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h new file mode 100644 index 000000000000..6d6299543578 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_CMD_H__ +#define __INTEL_PXP_CMD_H__ + +#include <linux/types.h> + +struct intel_pxp; + +int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx); + +#endif /* __INTEL_PXP_CMD_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c new file mode 100644 index 000000000000..10e1e45471f1 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/debugfs.h> +#include <drm/drm_print.h> + +#include "gt/intel_gt_debugfs.h" +#include "pxp/intel_pxp.h" +#include "pxp/intel_pxp_irq.h" +#include "i915_drv.h" + +static int pxp_info_show(struct seq_file *m, void *data) +{ + struct intel_pxp *pxp = m->private; + struct drm_printer p = drm_seq_file_printer(m); + bool enabled = intel_pxp_is_enabled(pxp); + + if (!enabled) { + drm_printf(&p, "pxp disabled\n"); + return 0; + } + + drm_printf(&p, "active: %s\n", yesno(intel_pxp_is_active(pxp))); + drm_printf(&p, "instance counter: %u\n", pxp->key_instance); + + return 0; +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info); + +static int pxp_terminate_get(void *data, u64 *val) +{ + /* nothing to read */ + return -EPERM; +} + +static int pxp_terminate_set(void *data, u64 val) +{ + struct intel_pxp *pxp = data; + struct intel_gt *gt = pxp_to_gt(pxp); + + if (!intel_pxp_is_active(pxp)) + return -ENODEV; + + /* simulate a termination interrupt */ + spin_lock_irq(>->irq_lock); + intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT); + spin_unlock_irq(>->irq_lock); + + if (!wait_for_completion_timeout(&pxp->termination, + msecs_to_jiffies(100))) + return -ETIMEDOUT; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n"); +void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root) +{ + static const struct intel_gt_debugfs_file files[] = { + { "info", &pxp_info_fops, NULL }, + { "terminate_state", &pxp_terminate_fops, NULL }, + }; + struct dentry *root; + + if (!gt_root) + return; + + if (!HAS_PXP((pxp_to_gt(pxp)->i915))) + return; + + root = debugfs_create_dir("pxp", gt_root); + if (IS_ERR(root)) + return; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h new file mode 100644 index 000000000000..7e0c3d2f5d7e --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_PXP_DEBUGFS_H__ +#define __INTEL_PXP_DEBUGFS_H__ + +struct intel_pxp; +struct dentry; + +#ifdef CONFIG_DRM_I915_PXP +void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root); +#else +static inline void +intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root) +{ +} +#endif + +#endif /* __INTEL_PXP_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c new file mode 100644 index 000000000000..8d5553772ded --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020 Intel Corporation. + */ +#include <linux/workqueue.h> +#include "intel_pxp.h" +#include "intel_pxp_irq.h" +#include "intel_pxp_session.h" +#include "gt/intel_gt_irq.h" +#include "gt/intel_gt_types.h" +#include "i915_irq.h" +#include "i915_reg.h" +#include "intel_runtime_pm.h" + +/** + * intel_pxp_irq_handler - Handles PXP interrupts. + * @pxp: pointer to pxp struct + * @iir: interrupt vector + */ +void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + + if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp))) + return; + + lockdep_assert_held(>->irq_lock); + + if (unlikely(!iir)) + return; + + if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | + GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) { + /* immediately mark PXP as inactive on termination */ + intel_pxp_mark_termination_in_progress(pxp); + pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED; + } + + if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT) + pxp->session_events |= PXP_TERMINATION_COMPLETE; + + if (pxp->session_events) + queue_work(system_unbound_wq, &pxp->session_work); +} + +static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts) +{ + struct intel_uncore *uncore = gt->uncore; + const u32 mask = interrupts << 16; + + intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask); + intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask); +} + +static inline void pxp_irq_reset(struct intel_gt *gt) +{ + spin_lock_irq(>->irq_lock); + gen11_gt_reset_one_iir(gt, 0, GEN11_KCR); + spin_unlock_irq(>->irq_lock); +} + +void intel_pxp_irq_enable(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + + spin_lock_irq(>->irq_lock); + + if (!pxp->irq_enabled) + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR)); + + __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS); + pxp->irq_enabled = true; + + spin_unlock_irq(>->irq_lock); +} + +void intel_pxp_irq_disable(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + + /* + * We always need to submit a global termination when we re-enable the + * interrupts, so there is no need to make sure that the session state + * makes sense at the end of this function. Just make sure this is not + * called in a path were the driver consider the session as valid and + * doesn't call a termination on restart. + */ + GEM_WARN_ON(intel_pxp_is_active(pxp)); + + spin_lock_irq(>->irq_lock); + + pxp->irq_enabled = false; + __pxp_set_interrupts(gt, 0); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + pxp_irq_reset(gt); + + flush_work(&pxp->session_work); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h new file mode 100644 index 000000000000..8b5793654844 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_IRQ_H__ +#define __INTEL_PXP_IRQ_H__ + +#include <linux/types.h> + +struct intel_pxp; + +#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1) +#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2) +#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3) + +#define GEN12_PXP_INTERRUPTS \ + (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \ + GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \ + GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT) + +#ifdef CONFIG_DRM_I915_PXP +void intel_pxp_irq_enable(struct intel_pxp *pxp); +void intel_pxp_irq_disable(struct intel_pxp *pxp); +void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir); +#else +static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir) +{ +} +#endif + +#endif /* __INTEL_PXP_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c new file mode 100644 index 000000000000..6a7d4e2ee138 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020 Intel Corporation. + */ + +#include "intel_pxp.h" +#include "intel_pxp_irq.h" +#include "intel_pxp_pm.h" +#include "intel_pxp_session.h" +#include "i915_drv.h" + +void intel_pxp_suspend_prepare(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_invalidate(pxp); +} + +void intel_pxp_suspend(struct intel_pxp *pxp) +{ + intel_wakeref_t wakeref; + + if (!intel_pxp_is_enabled(pxp)) + return; + + with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) { + intel_pxp_fini_hw(pxp); + pxp->hw_state_invalidated = false; + } +} + +void intel_pxp_resume(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + /* + * The PXP component gets automatically unbound when we go into S3 and + * re-bound after we come out, so in that scenario we can defer the + * hw init to the bind call. + */ + if (!pxp->pxp_component) + return; + + intel_pxp_init_hw(pxp); +} + +void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_fini_hw(pxp); + + pxp->hw_state_invalidated = false; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h new file mode 100644 index 000000000000..16990a3f2f85 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_PM_H__ +#define __INTEL_PXP_PM_H__ + +#include "intel_pxp_types.h" + +#ifdef CONFIG_DRM_I915_PXP +void intel_pxp_suspend_prepare(struct intel_pxp *pxp); +void intel_pxp_suspend(struct intel_pxp *pxp); +void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_runtime_suspend(struct intel_pxp *pxp); +#else +static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_suspend(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_resume(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ +} +#endif +static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) +{ + intel_pxp_resume(pxp); +} +#endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c new file mode 100644 index 000000000000..598840b73dfa --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#include <drm/i915_drm.h> + +#include "i915_drv.h" + +#include "intel_pxp.h" +#include "intel_pxp_cmd.h" +#include "intel_pxp_session.h" +#include "intel_pxp_tee.h" +#include "intel_pxp_types.h" + +#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */ + +#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */ + +/* PXP global terminate register for session termination */ +#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8) + +static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id) +{ + struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore; + intel_wakeref_t wakeref; + u32 sip = 0; + + /* if we're suspended the session is considered off */ + with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) + sip = intel_uncore_read(uncore, GEN12_KCR_SIP); + + return sip & BIT(id); +} + +static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play) +{ + struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore; + intel_wakeref_t wakeref; + u32 mask = BIT(id); + int ret; + + /* if we're suspended the session is considered off */ + wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm); + if (!wakeref) + return in_play ? -ENODEV : 0; + + ret = intel_wait_for_register(uncore, + GEN12_KCR_SIP, + mask, + in_play ? mask : 0, + 100); + + intel_runtime_pm_put(uncore->rpm, wakeref); + + return ret; +} + +static int pxp_create_arb_session(struct intel_pxp *pxp) +{ + struct intel_gt *gt = pxp_to_gt(pxp); + int ret; + + pxp->arb_is_valid = false; + + if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) { + drm_err(>->i915->drm, "arb session already in play at creation time\n"); + return -EEXIST; + } + + ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION); + if (ret) { + drm_err(>->i915->drm, "tee cmd for arb session creation failed\n"); + return ret; + } + + ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true); + if (ret) { + drm_err(>->i915->drm, "arb session failed to go in play\n"); + return ret; + } + + if (!++pxp->key_instance) + ++pxp->key_instance; + + pxp->arb_is_valid = true; + + return 0; +} + +static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) +{ + int ret; + struct intel_gt *gt = pxp_to_gt(pxp); + + /* must mark termination in progress calling this function */ + GEM_WARN_ON(pxp->arb_is_valid); + + /* terminate the hw sessions */ + ret = intel_pxp_terminate_session(pxp, ARB_SESSION); + if (ret) { + drm_err(>->i915->drm, "Failed to submit session termination\n"); + return ret; + } + + ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false); + if (ret) { + drm_err(>->i915->drm, "Session state did not clear\n"); + return ret; + } + + intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1); + + return ret; +} + +static void pxp_terminate(struct intel_pxp *pxp) +{ + int ret; + + pxp->hw_state_invalidated = true; + + /* + * if we fail to submit the termination there is no point in waiting for + * it to complete. PXP will be marked as non-active until the next + * termination is issued. + */ + ret = pxp_terminate_arb_session_and_global(pxp); + if (ret) + complete_all(&pxp->termination); +} + +static void pxp_terminate_complete(struct intel_pxp *pxp) +{ + /* Re-create the arb session after teardown handle complete */ + if (fetch_and_zero(&pxp->hw_state_invalidated)) + pxp_create_arb_session(pxp); + + complete_all(&pxp->termination); +} + +void intel_pxp_session_work(struct work_struct *work) +{ + struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work); + struct intel_gt *gt = pxp_to_gt(pxp); + intel_wakeref_t wakeref; + u32 events = 0; + + spin_lock_irq(>->irq_lock); + events = fetch_and_zero(&pxp->session_events); + spin_unlock_irq(>->irq_lock); + + if (!events) + return; + + if (events & PXP_INVAL_REQUIRED) + intel_pxp_invalidate(pxp); + + /* + * If we're processing an event while suspending then don't bother, + * we're going to re-init everything on resume anyway. + */ + wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm); + if (!wakeref) + return; + + if (events & PXP_TERMINATION_REQUEST) { + events &= ~PXP_TERMINATION_COMPLETE; + pxp_terminate(pxp); + } + + if (events & PXP_TERMINATION_COMPLETE) + pxp_terminate_complete(pxp); + + intel_runtime_pm_put(gt->uncore->rpm, wakeref); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h new file mode 100644 index 000000000000..ba4c9d2b94b7 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_SESSION_H__ +#define __INTEL_PXP_SESSION_H__ + +#include <linux/types.h> + +struct work_struct; + +void intel_pxp_session_work(struct work_struct *work); + +#endif /* __INTEL_PXP_SESSION_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c new file mode 100644 index 000000000000..5d169624ad60 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2020 Intel Corporation. + */ + +#include <linux/component.h> + +#include <drm/i915_pxp_tee_interface.h> +#include <drm/i915_component.h> + +#include "i915_drv.h" +#include "intel_pxp.h" +#include "intel_pxp_session.h" +#include "intel_pxp_tee.h" +#include "intel_pxp_tee_interface.h" + +static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev) +{ + return &kdev_to_i915(i915_kdev)->gt.pxp; +} + +static int intel_pxp_tee_io_message(struct intel_pxp *pxp, + void *msg_in, u32 msg_in_size, + void *msg_out, u32 msg_out_max_size, + u32 *msg_out_rcv_size) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct i915_pxp_component *pxp_component = pxp->pxp_component; + int ret = 0; + + mutex_lock(&pxp->tee_mutex); + + /* + * The binding of the component is asynchronous from i915 probe, so we + * can't be sure it has happened. + */ + if (!pxp_component) { + ret = -ENODEV; + goto unlock; + } + + ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size); + if (ret) { + drm_err(&i915->drm, "Failed to send PXP TEE message\n"); + goto unlock; + } + + ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size); + if (ret < 0) { + drm_err(&i915->drm, "Failed to receive PXP TEE message\n"); + goto unlock; + } + + if (ret > msg_out_max_size) { + drm_err(&i915->drm, + "Failed to receive PXP TEE message due to unexpected output size\n"); + ret = -ENOSPC; + goto unlock; + } + + if (msg_out_rcv_size) + *msg_out_rcv_size = ret; + + ret = 0; +unlock: + mutex_unlock(&pxp->tee_mutex); + return ret; +} + +/** + * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee + * @i915_kdev: pointer to i915 kernel device + * @tee_kdev: pointer to tee kernel device + * @data: pointer to pxp_tee_master containing the function pointers + * + * This bind function is called during the system boot or resume from system sleep. + * + * Return: return 0 if successful. + */ +static int i915_pxp_tee_component_bind(struct device *i915_kdev, + struct device *tee_kdev, void *data) +{ + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); + struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + intel_wakeref_t wakeref; + + mutex_lock(&pxp->tee_mutex); + pxp->pxp_component = data; + pxp->pxp_component->tee_dev = tee_kdev; + mutex_unlock(&pxp->tee_mutex); + + /* if we are suspended, the HW will be re-initialized on resume */ + wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); + if (!wakeref) + return 0; + + /* the component is required to fully start the PXP HW */ + intel_pxp_init_hw(pxp); + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return 0; +} + +static void i915_pxp_tee_component_unbind(struct device *i915_kdev, + struct device *tee_kdev, void *data) +{ + struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + + intel_pxp_fini_hw(pxp); + + mutex_lock(&pxp->tee_mutex); + pxp->pxp_component = NULL; + mutex_unlock(&pxp->tee_mutex); +} + +static const struct component_ops i915_pxp_tee_component_ops = { + .bind = i915_pxp_tee_component_bind, + .unbind = i915_pxp_tee_component_unbind, +}; + +int intel_pxp_tee_component_init(struct intel_pxp *pxp) +{ + int ret; + struct intel_gt *gt = pxp_to_gt(pxp); + struct drm_i915_private *i915 = gt->i915; + + ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops, + I915_COMPONENT_PXP); + if (ret < 0) { + drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret); + return ret; + } + + pxp->pxp_component_added = true; + + return 0; +} + +void intel_pxp_tee_component_fini(struct intel_pxp *pxp) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + + if (!pxp->pxp_component_added) + return; + + component_del(i915->drm.dev, &i915_pxp_tee_component_ops); + pxp->pxp_component_added = false; +} + +int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, + int arb_session_id) +{ + struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915; + struct pxp_tee_create_arb_in msg_in = {0}; + struct pxp_tee_create_arb_out msg_out = {0}; + int ret; + + msg_in.header.api_version = PXP_TEE_APIVER; + msg_in.header.command_id = PXP_TEE_ARB_CMDID; + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + msg_in.protection_mode = PXP_TEE_ARB_PROTECTION_MODE; + msg_in.session_id = arb_session_id; + + ret = intel_pxp_tee_io_message(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), + NULL); + + if (ret) + drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret); + + return ret; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h new file mode 100644 index 000000000000..c136053ce340 --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_TEE_H__ +#define __INTEL_PXP_TEE_H__ + +#include "intel_pxp.h" + +int intel_pxp_tee_component_init(struct intel_pxp *pxp); +void intel_pxp_tee_component_fini(struct intel_pxp *pxp); + +int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, + int arb_session_id); + +#endif /* __INTEL_PXP_TEE_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h new file mode 100644 index 000000000000..36e9b0868f5c --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_TEE_INTERFACE_H__ +#define __INTEL_PXP_TEE_INTERFACE_H__ + +#include <linux/types.h> + +#define PXP_TEE_APIVER 0x40002 +#define PXP_TEE_ARB_CMDID 0x1e +#define PXP_TEE_ARB_PROTECTION_MODE 0x2 + +/* PXP TEE message header */ +struct pxp_tee_cmd_header { + u32 api_version; + u32 command_id; + u32 status; + /* Length of the message (excluding the header) */ + u32 buffer_len; +} __packed; + +/* PXP TEE message input to create a arbitrary session */ +struct pxp_tee_create_arb_in { + struct pxp_tee_cmd_header header; + u32 protection_mode; + u32 session_id; +} __packed; + +/* PXP TEE message output to create a arbitrary session */ +struct pxp_tee_create_arb_out { + struct pxp_tee_cmd_header header; +} __packed; + +#endif /* __INTEL_PXP_TEE_INTERFACE_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h new file mode 100644 index 000000000000..7ce5f37ee12e --- /dev/null +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2020, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_PXP_TYPES_H__ +#define __INTEL_PXP_TYPES_H__ + +#include <linux/completion.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct intel_context; +struct i915_pxp_component; + +/** + * struct intel_pxp - pxp state + */ +struct intel_pxp { + /** + * @pxp_component: i915_pxp_component struct of the bound mei_pxp + * module. Only set and cleared inside component bind/unbind functions, + * which are protected by &tee_mutex. + */ + struct i915_pxp_component *pxp_component; + /** + * @pxp_component_added: track if the pxp component has been added. + * Set and cleared in tee init and fini functions respectively. + */ + bool pxp_component_added; + + /** @ce: kernel-owned context used for PXP operations */ + struct intel_context *ce; + + /** @arb_mutex: protects arb session start */ + struct mutex arb_mutex; + /** + * @arb_is_valid: tracks arb session status. + * After a teardown, the arb session can still be in play on the HW + * even if the keys are gone, so we can't rely on the HW state of the + * session to know if it's valid and need to track the status in SW. + */ + bool arb_is_valid; + + /** + * @key_instance: tracks which key instance we're on, so we can use it + * to determine if an object was created using the current key or a + * previous one. + */ + u32 key_instance; + + /** @tee_mutex: protects the tee channel binding and messaging. */ + struct mutex tee_mutex; + + /** + * @hw_state_invalidated: if the HW perceives an attack on the integrity + * of the encryption it will invalidate the keys and expect SW to + * re-initialize the session. We keep track of this state to make sure + * we only re-start the arb session when required. + */ + bool hw_state_invalidated; + + /** @irq_enabled: tracks the status of the kcr irqs */ + bool irq_enabled; + /** + * @termination: tracks the status of a pending termination. Only + * re-initialized under gt->irq_lock and completed in &session_work. + */ + struct completion termination; + + /** @session_work: worker that manages session events. */ + struct work_struct session_work; + /** @session_events: pending session events, protected with gt->irq_lock. */ + u32 session_events; +#define PXP_TERMINATION_REQUEST BIT(0) +#define PXP_TERMINATION_COMPLETE BIT(1) +#define PXP_INVAL_REQUIRED BIT(2) +}; + +#endif /* __INTEL_PXP_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index f99bb0113726..7e0658a77659 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -442,6 +442,7 @@ static int igt_evict_contexts(void *arg) /* Overfill the GGTT with context objects and so try to evict one. */ for_each_engine(engine, gt, id) { struct i915_sw_fence fence; + struct i915_request *last = NULL; count = 0; onstack_fence_init(&fence); @@ -479,6 +480,9 @@ static int igt_evict_contexts(void *arg) i915_request_add(rq); count++; + if (last) + i915_request_put(last); + last = i915_request_get(rq); err = 0; } while(1); onstack_fence_fini(&fence); @@ -486,6 +490,21 @@ static int igt_evict_contexts(void *arg) count, engine->name); if (err) break; + if (last) { + if (i915_request_wait(last, 0, HZ) < 0) { + err = -EIO; + i915_request_put(last); + pr_err("Failed waiting for last request (on %s)", + engine->name); + break; + } + i915_request_put(last); + } + err = intel_gt_wait_for_idle(engine->gt, HZ * 3); + if (err) { + pr_err("Failed to idle GT (on %s)", engine->name); + break; + } } mutex_lock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index f843a5040706..46f4236039a9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = i915_ppgtt_create(&dev_priv->gt); + ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - ppgtt = i915_ppgtt_create(&dev_priv->gt); + ppgtt = i915_ppgtt_create(&dev_priv->gt, 0); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_free; @@ -1300,7 +1300,7 @@ static int exercise_mock(struct drm_i915_private *i915, if (!ctx) return -ENOMEM; - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); err = func(vm, 0, min(vm->total, limit), end_time); i915_vm_put(vm); @@ -1848,7 +1848,7 @@ static int igt_cs_tlb(void *arg) goto out_unlock; } - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); if (i915_is_ggtt(vm)) goto out_vm; diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index cfa5c4165a4f..bdd290f2bf3c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -47,5 +47,7 @@ selftest(execlists, intel_execlists_live_selftests) selftest(ring_submission, intel_ring_submission_live_selftests) selftest(perf, i915_perf_live_selftests) selftest(slpc, intel_slpc_live_selftests) +selftest(guc, intel_guc_live_selftests) +selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests) /* Here be dragons: keep last to run last! */ selftest(late_gt_pm, intel_gt_pm_late_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..9979ef9197cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -209,6 +209,10 @@ static int igt_request_rewind(void *arg) int err = -EINVAL; ctx[0] = mock_context(i915, "A"); + if (!ctx[0]) { + err = -ENOMEM; + goto err_ctx_0; + } ce = i915_gem_context_get_engine(ctx[0], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -223,6 +227,10 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); + if (!ctx[1]) { + err = -ENOMEM; + goto err_ctx_1; + } ce = i915_gem_context_get_engine(ctx[1], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -261,9 +269,11 @@ err: i915_request_put(vip); err_context_1: mock_context_close(ctx[1]); +err_ctx_1: i915_request_put(request); err_context_0: mock_context_close(ctx[0]); +err_ctx_0: mock_device_flush(i915); return err; } @@ -2805,7 +2815,7 @@ static int p_sync0(void *arg) i915_request_add(rq); err = 0; - if (i915_request_wait(rq, 0, HZ / 5) < 0) + if (i915_request_wait(rq, 0, HZ) < 0) err = -ETIME; i915_request_put(rq); if (err) @@ -2876,7 +2886,7 @@ static int p_sync1(void *arg) i915_request_add(rq); err = 0; - if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + if (prev && i915_request_wait(prev, 0, HZ) < 0) err = -ETIME; i915_request_put(prev); prev = rq; diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index cbf45d85cbff..daa985e5a19b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -28,7 +28,7 @@ #include "../i915_selftest.h" -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { switch (state) { diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index dd0607254a95..1f10fe36619b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -39,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma, { bool ok = true; - if (vma->vm != rcu_access_pointer(ctx->vm)) { + if (vma->vm != ctx->vm) { pr_err("VMA created with wrong VM\n"); ok = false; } @@ -118,7 +118,7 @@ static int create_vmas(struct drm_i915_private *i915, struct i915_vma *vma; int err; - vm = i915_gem_context_get_vm_rcu(ctx); + vm = i915_gem_context_get_eb_vm(ctx); vma = checked_vma_instance(obj, vm, NULL); i915_vm_put(vm); if (IS_ERR(vma)) diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 9f8590b868a9..a2838c65f8a5 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -36,7 +36,7 @@ void igt_global_reset_unlock(struct intel_gt *gt) enum intel_engine_id id; for_each_engine(engine, gt, id) - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); clear_bit(I915_RESET_BACKOFF, >->reset.flags); wake_up_all(>->reset.queue); diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 418caae84759..0d5df0dc7212 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -225,7 +225,7 @@ static int igt_mock_reserve(void *arg) out_close: close_objects(mem, &objects); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_free_order: kfree(order); return err; @@ -439,7 +439,7 @@ static int igt_mock_splintered_region(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -507,7 +507,7 @@ static int igt_mock_max_segment(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -1196,7 +1196,7 @@ int intel_memory_region_mock_selftests(void) err = i915_subtests(tests, mem); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_unref: mock_destroy_device(i915); return err; diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c index 4b328346b48a..310fb83c527e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c @@ -14,6 +14,18 @@ #define REDUCED_PREEMPT 10 #define WAIT_FOR_RESET_TIME 10000 +struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + return engine; + + pr_err("No valid engine found!\n"); + return NULL; +} + int intel_selftest_modify_policy(struct intel_engine_cs *engine, struct intel_selftest_saved_policy *saved, u32 modify_type) diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h index 35c098601ac0..ae60bb507f45 100644 --- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h +++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h @@ -10,6 +10,7 @@ struct i915_request; struct intel_engine_cs; +struct intel_gt; struct intel_selftest_saved_policy { u32 flags; @@ -23,6 +24,7 @@ enum selftest_scheduler_modify { SELFTEST_SCHEDULER_MODIFY_FAST_RESET, }; +struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt); int intel_selftest_modify_policy(struct intel_engine_cs *engine, struct intel_selftest_saved_policy *saved, enum selftest_scheduler_modify modify_type); diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 720b60853f8b..bc8128170a99 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -62,30 +62,40 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges, static int intel_shadow_table_check(void) { struct { - const i915_reg_t *regs; + const struct i915_range *regs; unsigned int size; - } reg_lists[] = { + } range_lists[] = { { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) }, { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) }, { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) }, - { xehp_shadowed_regs, ARRAY_SIZE(xehp_shadowed_regs) }, + { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) }, }; - const i915_reg_t *reg; + const struct i915_range *range; unsigned int i, j; s32 prev; - for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) { - reg = reg_lists[j].regs; - for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) { - u32 offset = i915_mmio_reg_offset(*reg); + for (j = 0; j < ARRAY_SIZE(range_lists); ++j) { + range = range_lists[j].regs; + for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) { + if (range->end < range->start) { + pr_err("%s: range[%d]:(%06x-%06x) has end before start\n", + __func__, i, range->start, range->end); + return -EINVAL; + } + + if (prev >= (s32)range->start) { + pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n", + __func__, i, range->start, range->end, prev); + return -EINVAL; + } - if (prev >= (s32)offset) { - pr_err("%s: entry[%d]:(%x) is before previous (%x)\n", - __func__, i, offset, prev); + if (range->start % 4) { + pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n", + __func__, i, range->start, range->end); return -EINVAL; } - prev = offset; + prev = range->end; } } diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index 080b90b63d16..bf2752cc1e0b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -26,7 +26,7 @@ /* Small library of different fence types useful for writing tests */ -static int __i915_sw_fence_call +static int nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { return NOTIFY_DONE; @@ -41,12 +41,12 @@ void __onstack_fence_init(struct i915_sw_fence *fence, __init_waitqueue_head(&fence->wait, name, key); atomic_set(&fence->pending, 1); fence->error = 0; - fence->flags = (unsigned long)nop_fence_notify; + fence->fn = nop_fence_notify; } void onstack_fence_fini(struct i915_sw_fence *fence) { - if (!fence->flags) + if (!fence->fn) return; i915_sw_fence_commit(fence); @@ -89,7 +89,7 @@ struct heap_fence { }; }; -static int __i915_sw_fence_call +static int heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct heap_fence *h = container_of(fence, typeof(*h), fence); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 4f8180146888..d0e2e61de8d4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -165,7 +165,7 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - mkwrite_device_info(i915)->graphics_ver = -1; + mkwrite_device_info(i915)->graphics.ver = -1; mkwrite_device_info(i915)->page_sizes = I915_GTT_PAGE_SIZE_4K | @@ -177,6 +177,8 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore, i915); + spin_lock_init(&i915->gpu_error.lock); + i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index efa86dffe3c6..19bff8afcaaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -6,8 +6,6 @@ #include <drm/ttm/ttm_placement.h> #include <linux/scatterlist.h> -#include <drm/ttm/ttm_placement.h> - #include "gem/i915_gem_region.h" #include "intel_memory_region.h" #include "intel_region_ttm.h" @@ -17,9 +15,9 @@ static void mock_region_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + i915_refct_sgt_put(obj->mm.rsgt); + obj->mm.rsgt = NULL; intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); - sg_free_table(pages); - kfree(pages); } static int mock_region_get_pages(struct drm_i915_gem_object *obj) @@ -38,12 +36,14 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) if (IS_ERR(obj->mm.res)) return PTR_ERR(obj->mm.res); - pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); - if (IS_ERR(pages)) { - err = PTR_ERR(pages); + obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + obj->mm.res); + if (IS_ERR(obj->mm.rsgt)) { + err = PTR_ERR(obj->mm.rsgt); goto err_free_resource; } + pages = &obj->mm.rsgt->table; __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); return 0; @@ -84,13 +84,16 @@ static int mock_object_init(struct intel_memory_region *mem, return 0; } -static void mock_region_fini(struct intel_memory_region *mem) +static int mock_region_fini(struct intel_memory_region *mem) { struct drm_i915_private *i915 = mem->i915; int instance = mem->instance; + int ret; - intel_region_ttm_fini(mem); + ret = intel_region_ttm_fini(mem); ida_free(&i915->selftest.mock_region_instances, instance); + + return ret; } static const struct intel_memory_region_ops mock_region_ops = { diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c new file mode 100644 index 000000000000..ed2ac5752ac4 --- /dev/null +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2013-2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_iosf_mbi.h" +#include "vlv_sideband.h" + +/* + * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and + * VLV_VLV2_PUNIT_HAS_0.8.docx + */ + +/* Standard MMIO read, non-posted */ +#define SB_MRD_NP 0x00 +/* Standard MMIO write, non-posted */ +#define SB_MWR_NP 0x01 +/* Private register read, double-word addressing, non-posted */ +#define SB_CRRDDA_NP 0x06 +/* Private register write, double-word addressing, non-posted */ +#define SB_CRWRDA_NP 0x07 + +static void ping(void *info) +{ +} + +static void __vlv_punit_get(struct drm_i915_private *i915) +{ + iosf_mbi_punit_acquire(); + + /* + * Prevent the cpu from sleeping while we use this sideband, otherwise + * the punit may cause a machine hang. The issue appears to be isolated + * with changing the power state of the CPU package while changing + * the power state via the punit, and we have only observed it + * reliably on 4-core Baytail systems suggesting the issue is in the + * power delivery mechanism and likely to be board/function + * specific. Hence we presume the workaround needs only be applied + * to the Valleyview P-unit and not all sideband communications. + */ + if (IS_VALLEYVIEW(i915)) { + cpu_latency_qos_update_request(&i915->sb_qos, 0); + on_each_cpu(ping, NULL, 1); + } +} + +static void __vlv_punit_put(struct drm_i915_private *i915) +{ + if (IS_VALLEYVIEW(i915)) + cpu_latency_qos_update_request(&i915->sb_qos, + PM_QOS_DEFAULT_VALUE); + + iosf_mbi_punit_release(); +} + +void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports) +{ + if (ports & BIT(VLV_IOSF_SB_PUNIT)) + __vlv_punit_get(i915); + + mutex_lock(&i915->sb_lock); +} + +void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports) +{ + mutex_unlock(&i915->sb_lock); + + if (ports & BIT(VLV_IOSF_SB_PUNIT)) + __vlv_punit_put(i915); +} + +static int vlv_sideband_rw(struct drm_i915_private *i915, + u32 devfn, u32 port, u32 opcode, + u32 addr, u32 *val) +{ + struct intel_uncore *uncore = &i915->uncore; + const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP); + int err; + + lockdep_assert_held(&i915->sb_lock); + if (port == IOSF_PORT_PUNIT) + iosf_mbi_assert_punit_acquired(); + + /* Flush the previous comms, just in case it failed last time. */ + if (intel_wait_for_register(uncore, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 5)) { + drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n", + is_read ? "read" : "write"); + return -EAGAIN; + } + + preempt_disable(); + + intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr); + intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val); + intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ, + (devfn << IOSF_DEVFN_SHIFT) | + (opcode << IOSF_OPCODE_SHIFT) | + (port << IOSF_PORT_SHIFT) | + (0xf << IOSF_BYTE_ENABLES_SHIFT) | + (0 << IOSF_BAR_SHIFT) | + IOSF_SB_BUSY); + + if (__intel_wait_for_register_fw(uncore, + VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, + 10000, 0, NULL) == 0) { + if (is_read) + *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA); + err = 0; + } else { + drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n", + is_read ? "read" : "write"); + err = -ETIMEDOUT; + } + + preempt_enable(); + + return err; +} + +u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, + SB_CRRDDA_NP, addr, &val); + + return val; +} + +int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val) +{ + return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, + SB_CRWRDA_NP, addr, &val); +} + +u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, + SB_CRRDDA_NP, reg, &val); + + return val; +} + +void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val) +{ + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT, + SB_CRWRDA_NP, reg, &val); +} + +u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC, + SB_CRRDDA_NP, addr, &val); + + return val; +} + +u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, + SB_CRRDDA_NP, reg, &val); + + return val; +} + +void vlv_iosf_sb_write(struct drm_i915_private *i915, + u8 port, u32 reg, u32 val) +{ + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, + SB_CRWRDA_NP, reg, &val); +} + +u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, + SB_CRRDDA_NP, reg, &val); + + return val; +} + +void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val) +{ + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK, + SB_CRWRDA_NP, reg, &val); +} + +u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg) +{ + u32 val = 0; + + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, + SB_CRRDDA_NP, reg, &val); + + return val; +} + +void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) +{ + vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU, + SB_CRWRDA_NP, reg, &val); +} + +static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy) +{ + /* + * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(i915)) + return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO; + else + return IOSF_PORT_DPIO; +} + +u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) +{ + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); + u32 val = 0; + + vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); + + /* + * FIXME: There might be some registers where all 1's is a valid value, + * so ideally we should check the register offset instead... + */ + drm_WARN(&i915->drm, val == 0xffffffff, + "DPIO read pipe %c reg 0x%x == 0x%x\n", + pipe_name(pipe), reg, val); + + return val; +} + +void vlv_dpio_write(struct drm_i915_private *i915, + enum pipe pipe, int reg, u32 val) +{ + u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); + + vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); +} + +u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg) +{ + u32 val = 0; + + vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP, + reg, &val); + return val; +} + +void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val) +{ + vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP, + reg, &val); +} diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h index d1d14bcb8f56..d7732f612e7f 100644 --- a/drivers/gpu/drm/i915/intel_sideband.h +++ b/drivers/gpu/drm/i915/vlv_sideband.h @@ -1,18 +1,16 @@ /* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2013-2021 Intel Corporation + */ -#ifndef _INTEL_SIDEBAND_H_ -#define _INTEL_SIDEBAND_H_ +#ifndef _VLV_SIDEBAND_H_ +#define _VLV_SIDEBAND_H_ #include <linux/bitops.h> #include <linux/types.h> -struct drm_i915_private; enum pipe; - -enum intel_sbi_destination { - SBI_ICLK, - SBI_MPHY, -}; +struct drm_i915_private; enum { VLV_IOSF_SB_BUNIT, @@ -122,22 +120,4 @@ static inline void vlv_punit_put(struct drm_i915_private *i915) vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT)); } -u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, - enum intel_sbi_destination destination); -void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, - enum intel_sbi_destination destination); - -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1); -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, - u32 val, int fast_timeout_us, - int slow_timeout_ms); -#define sandybridge_pcode_write(i915, mbox, val) \ - sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0) - -int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, - u32 reply_mask, u32 reply, int timeout_base_ms); - -int intel_pcode_init(struct drm_i915_private *i915); - -#endif /* _INTEL_SIDEBAND_H */ +#endif /* _VLV_SIDEBAND_H_ */ diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index b5fa0e45a839..bb9738c7c825 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -4,7 +4,7 @@ config DRM_IMX select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on IMX_IPUV3_CORE help diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig index 2b17a964ff05..7374f1952762 100644 --- a/drivers/gpu/drm/imx/dcss/Kconfig +++ b/drivers/gpu/drm/imx/dcss/Kconfig @@ -1,7 +1,7 @@ config DRM_IMX_DCSS tristate "i.MX8MQ DCSS" select IMX_IRQSTEER - select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER select VIDEOMODE_HELPERS depends on DRM && ARCH_MXC && ARM64 help diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 3b57f8be007c..001f59fb06d5 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -8,7 +8,6 @@ config DRM_INGENIC select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index a5df1c8d34cd..b4943a56be09 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -21,6 +21,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -41,6 +42,8 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> +#define HWDESC_PALETTE 2 + struct ingenic_dma_hwdesc { u32 next; u32 addr; @@ -49,9 +52,7 @@ struct ingenic_dma_hwdesc { } __aligned(16); struct ingenic_dma_hwdescs { - struct ingenic_dma_hwdesc hwdesc_f0; - struct ingenic_dma_hwdesc hwdesc_f1; - struct ingenic_dma_hwdesc hwdesc_pal; + struct ingenic_dma_hwdesc hwdesc[3]; u16 palette[256] __aligned(16); }; @@ -64,6 +65,11 @@ struct jz_soc_info { unsigned int num_formats_f0, num_formats_f1; }; +struct ingenic_drm_private_state { + struct drm_private_state base; + bool use_palette; +}; + struct ingenic_drm { struct drm_device drm; /* @@ -99,8 +105,53 @@ struct ingenic_drm { struct mutex clk_mutex; bool update_clk_rate; struct notifier_block clock_nb; + + struct drm_private_obj private_obj; }; +struct ingenic_drm_bridge { + struct drm_encoder encoder; + struct drm_bridge bridge, *next_bridge; + + struct drm_bus_cfg bus_cfg; +}; + +static inline struct ingenic_drm_bridge * +to_ingenic_drm_bridge(struct drm_encoder *encoder) +{ + return container_of(encoder, struct ingenic_drm_bridge, encoder); +} + +static inline struct ingenic_drm_private_state * +to_ingenic_drm_priv_state(struct drm_private_state *state) +{ + return container_of(state, struct ingenic_drm_private_state, base); +} + +static struct ingenic_drm_private_state * +ingenic_drm_get_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_ingenic_drm_priv_state(priv_state); +} + +static struct ingenic_drm_private_state * +ingenic_drm_get_new_priv_state(struct ingenic_drm *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj); + if (!priv_state) + return NULL; + + return to_ingenic_drm_priv_state(priv_state); +} + static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -141,6 +192,14 @@ static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb) return container_of(nb, struct ingenic_drm, clock_nb); } +static inline dma_addr_t dma_hwdesc_addr(const struct ingenic_drm *priv, + unsigned int idx) +{ + u32 offset = offsetof(struct ingenic_dma_hwdescs, hwdesc[idx]); + + return priv->dma_hwdescs_phys + offset; +} + static int ingenic_drm_update_pixclk(struct notifier_block *nb, unsigned long action, void *data) @@ -163,9 +222,20 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ingenic_drm *priv = drm_crtc_get_priv(crtc); + struct ingenic_drm_private_state *priv_state; + unsigned int next_id; + + priv_state = ingenic_drm_get_priv_state(priv, state); + if (WARN_ON(IS_ERR(priv_state))) + return; regmap_write(priv->map, JZ_REG_LCD_STATE, 0); + /* Set addresses of our DMA descriptor chains */ + next_id = priv_state->use_palette ? HWDESC_PALETTE : 0; + regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_addr(priv, next_id)); + regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_addr(priv, 1)); + regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_ENABLE | JZ_LCD_CTRL_DISABLE, JZ_LCD_CTRL_ENABLE); @@ -369,6 +439,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct ingenic_drm *priv = drm_device_get_priv(plane->dev); + struct ingenic_drm_private_state *priv_state; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; int ret; @@ -381,6 +452,10 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!crtc_state)) return -EINVAL; + priv_state = ingenic_drm_get_priv_state(priv, state); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, @@ -399,6 +474,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane, (new_plane_state->src_h >> 16) != new_plane_state->crtc_h)) return -EINVAL; + priv_state->use_palette = new_plane_state->fb && + new_plane_state->fb->format->format == DRM_FORMAT_C8; + /* * Require full modeset if enabling or disabling a plane, or changing * its position, size or depth. @@ -558,9 +636,10 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, struct ingenic_drm *priv = drm_device_get_priv(plane->dev); struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state, plane); + unsigned int width, height, cpp, next_id, plane_id; + struct ingenic_drm_private_state *priv_state; struct drm_crtc_state *crtc_state; struct ingenic_dma_hwdesc *hwdesc; - unsigned int width, height, cpp, offset; dma_addr_t addr; u32 fourcc; @@ -569,32 +648,26 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, drm_fb_cma_sync_non_coherent(&priv->drm, oldstate, newstate); crtc_state = newstate->crtc->state; + plane_id = !!(priv->soc_info->has_osd && plane != &priv->f0); addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0); width = newstate->src_w >> 16; height = newstate->src_h >> 16; cpp = newstate->fb->format->cpp[0]; - if (!priv->soc_info->has_osd || plane == &priv->f0) - hwdesc = &priv->dma_hwdescs->hwdesc_f0; - else - hwdesc = &priv->dma_hwdescs->hwdesc_f1; + priv_state = ingenic_drm_get_new_priv_state(priv, state); + next_id = (priv_state && priv_state->use_palette) ? HWDESC_PALETTE : plane_id; + hwdesc = &priv->dma_hwdescs->hwdesc[plane_id]; hwdesc->addr = addr; hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4); + hwdesc->next = dma_hwdesc_addr(priv, next_id); if (drm_atomic_crtc_needs_modeset(crtc_state)) { fourcc = newstate->fb->format->format; ingenic_drm_plane_config(priv->dev, plane, fourcc); - if (fourcc == DRM_FORMAT_C8) - offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal); - else - offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - - priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset; - crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8; } @@ -609,11 +682,10 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, { struct ingenic_drm *priv = drm_device_get_priv(encoder->dev); struct drm_display_mode *mode = &crtc_state->adjusted_mode; - struct drm_connector *conn = conn_state->connector; - struct drm_display_info *info = &conn->display_info; + struct ingenic_drm_bridge *bridge = to_ingenic_drm_bridge(encoder); unsigned int cfg, rgbcfg = 0; - priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS; + priv->panel_is_sharp = bridge->bus_cfg.flags & DRM_BUS_FLAG_SHARP_SIGNALS; if (priv->panel_is_sharp) { cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY; @@ -626,19 +698,19 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NVSYNC) cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW; - if (info->bus_flags & DRM_BUS_FLAG_DE_LOW) + if (bridge->bus_cfg.flags & DRM_BUS_FLAG_DE_LOW) cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW; - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + if (bridge->bus_cfg.flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE; if (!priv->panel_is_sharp) { - if (conn->connector_type == DRM_MODE_CONNECTOR_TV) { + if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) cfg |= JZ_LCD_CFG_MODE_TV_OUT_I; else cfg |= JZ_LCD_CFG_MODE_TV_OUT_P; } else { - switch (*info->bus_formats) { + switch (bridge->bus_cfg.format) { case MEDIA_BUS_FMT_RGB565_1X16: cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT; break; @@ -664,20 +736,29 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg); } -static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static int ingenic_drm_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); + + return drm_bridge_attach(bridge->encoder, ib->next_bridge, + &ib->bridge, flags); +} + +static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - struct drm_display_info *info = &conn_state->connector->display_info; struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); - if (info->num_bus_formats != 1) - return -EINVAL; + ib->bus_cfg = bridge_state->output_bus_cfg; if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) return 0; - switch (*info->bus_formats) { + switch (bridge_state->output_bus_cfg.format) { case MEDIA_BUS_FMT_RGB888_3X8: case MEDIA_BUS_FMT_RGB888_3X8_DELTA: /* @@ -764,6 +845,28 @@ ingenic_drm_gem_create_object(struct drm_device *drm, size_t size) return &obj->base; } +static struct drm_private_state * +ingenic_drm_duplicate_state(struct drm_private_obj *obj) +{ + struct ingenic_drm_private_state *state = to_ingenic_drm_priv_state(obj->state); + + state = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void ingenic_drm_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct ingenic_drm_private_state *priv_state = to_ingenic_drm_priv_state(state); + + kfree(priv_state); +} + DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); static const struct drm_driver ingenic_drm_driver_data = { @@ -819,8 +922,16 @@ static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = { }; static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs = { - .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set, - .atomic_check = ingenic_drm_encoder_atomic_check, + .atomic_mode_set = ingenic_drm_encoder_atomic_mode_set, +}; + +static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = { + .attach = ingenic_drm_bridge_attach, + .atomic_check = ingenic_drm_bridge_atomic_check, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, }; static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { @@ -834,6 +945,11 @@ static struct drm_mode_config_helper_funcs ingenic_drm_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail, }; +static const struct drm_private_state_funcs ingenic_drm_private_state_funcs = { + .atomic_duplicate_state = ingenic_drm_duplicate_state, + .atomic_destroy_state = ingenic_drm_destroy_state, +}; + static void ingenic_drm_unbind_all(void *d) { struct ingenic_drm *priv = d; @@ -846,21 +962,57 @@ static void __maybe_unused ingenic_drm_release_rmem(void *d) of_reserved_mem_device_release(d); } +static void ingenic_drm_configure_hwdesc(struct ingenic_drm *priv, + unsigned int hwdesc, + unsigned int next_hwdesc, u32 id) +{ + struct ingenic_dma_hwdesc *desc = &priv->dma_hwdescs->hwdesc[hwdesc]; + + desc->next = dma_hwdesc_addr(priv, next_hwdesc); + desc->id = id; +} + +static void ingenic_drm_configure_hwdesc_palette(struct ingenic_drm *priv) +{ + struct ingenic_dma_hwdesc *desc; + + ingenic_drm_configure_hwdesc(priv, HWDESC_PALETTE, 0, 0xc0); + + desc = &priv->dma_hwdescs->hwdesc[HWDESC_PALETTE]; + desc->addr = priv->dma_hwdescs_phys + + offsetof(struct ingenic_dma_hwdescs, palette); + desc->cmd = JZ_LCD_CMD_ENABLE_PAL + | (sizeof(priv->dma_hwdescs->palette) / 4); +} + +static void ingenic_drm_configure_hwdesc_plane(struct ingenic_drm *priv, + unsigned int plane) +{ + ingenic_drm_configure_hwdesc(priv, plane, plane, 0xf0 | plane); +} + +static void ingenic_drm_atomic_private_obj_fini(struct drm_device *drm, void *private_obj) +{ + drm_atomic_private_obj_fini(private_obj); +} + static int ingenic_drm_bind(struct device *dev, bool has_components) { struct platform_device *pdev = to_platform_device(dev); + struct ingenic_drm_private_state *private_state; const struct jz_soc_info *soc_info; struct ingenic_drm *priv; struct clk *parent_clk; struct drm_plane *primary; struct drm_bridge *bridge; struct drm_panel *panel; + struct drm_connector *connector; struct drm_encoder *encoder; + struct ingenic_drm_bridge *ib; struct drm_device *drm; void __iomem *base; long parent_rate; unsigned int i, clone_mask = 0; - dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1; int ret, irq; soc_info = of_device_get_match_data(dev); @@ -942,27 +1094,14 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) if (!priv->dma_hwdescs) return -ENOMEM; - /* Configure DMA hwdesc for foreground0 plane */ - dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0; - priv->dma_hwdescs->hwdesc_f0.id = 0xf0; + ingenic_drm_configure_hwdesc_plane(priv, 0); /* Configure DMA hwdesc for foreground1 plane */ - dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1); - priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1; - priv->dma_hwdescs->hwdesc_f1.id = 0xf1; + ingenic_drm_configure_hwdesc_plane(priv, 1); /* Configure DMA hwdesc for palette */ - priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0); - priv->dma_hwdescs->hwdesc_pal.id = 0xc0; - priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys - + offsetof(struct ingenic_dma_hwdescs, palette); - priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL - | (sizeof(priv->dma_hwdescs->palette) / 4); + ingenic_drm_configure_hwdesc_palette(priv); primary = priv->soc_info->has_osd ? &priv->f1 : &priv->f0; @@ -1046,20 +1185,36 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) bridge = devm_drm_panel_bridge_add_typed(dev, panel, DRM_MODE_CONNECTOR_DPI); - encoder = drmm_plain_encoder_alloc(drm, NULL, DRM_MODE_ENCODER_DPI, NULL); - if (IS_ERR(encoder)) { - ret = PTR_ERR(encoder); + ib = drmm_encoder_alloc(drm, struct ingenic_drm_bridge, encoder, + NULL, DRM_MODE_ENCODER_DPI, NULL); + if (IS_ERR(ib)) { + ret = PTR_ERR(ib); dev_err(dev, "Failed to init encoder: %d\n", ret); return ret; } - encoder->possible_crtcs = 1; + encoder = &ib->encoder; + encoder->possible_crtcs = drm_crtc_mask(&priv->crtc); drm_encoder_helper_add(encoder, &ingenic_drm_encoder_helper_funcs); - ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) + ib->bridge.funcs = &ingenic_drm_bridge_funcs; + ib->next_bridge = bridge; + + ret = drm_bridge_attach(encoder, &ib->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(dev, "Unable to attach bridge\n"); return ret; + } + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) { + dev_err(dev, "Unable to init connector\n"); + return PTR_ERR(connector); + } + + drm_connector_attach_encoder(connector, encoder); } drm_for_each_encoder(encoder, drm) { @@ -1112,10 +1267,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) } } - /* Set address of our DMA descriptor chain */ - regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0); - regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1); - /* Enable OSD if available */ if (soc_info->has_osd) regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN); @@ -1130,6 +1281,20 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) goto err_devclk_disable; } + private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); + if (!private_state) { + ret = -ENOMEM; + goto err_clk_notifier_unregister; + } + + drm_atomic_private_obj_init(drm, &priv->private_obj, &private_state->base, + &ingenic_drm_private_state_funcs); + + ret = drmm_add_action_or_reset(drm, ingenic_drm_atomic_private_obj_fini, + &priv->private_obj); + if (ret) + goto err_private_state_free; + ret = drm_dev_register(drm, 0); if (ret) { dev_err(dev, "Failed to register DRM driver\n"); @@ -1140,6 +1305,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components) return 0; +err_private_state_free: + kfree(private_state); err_clk_notifier_unregister: clk_notifier_unregister(parent_clk, &priv->clock_nb); err_devclk_disable: diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c index aeb8a757d213..2737fc521e15 100644 --- a/drivers/gpu/drm/ingenic/ingenic-ipu.c +++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c @@ -45,6 +45,12 @@ struct soc_info { unsigned int weight, unsigned int offset); }; +struct ingenic_ipu_private_state { + struct drm_private_state base; + + unsigned int num_w, num_h, denom_w, denom_h; +}; + struct ingenic_ipu { struct drm_plane plane; struct drm_device *drm; @@ -54,12 +60,12 @@ struct ingenic_ipu { const struct soc_info *soc_info; bool clk_enabled; - unsigned int num_w, num_h, denom_w, denom_h; - dma_addr_t addr_y, addr_u, addr_v; struct drm_property *sharpness_prop; unsigned int sharpness; + + struct drm_private_obj private_obj; }; /* Signed 15.16 fixed-point math (for bicubic scaling coefficients) */ @@ -73,6 +79,36 @@ static inline struct ingenic_ipu *plane_to_ingenic_ipu(struct drm_plane *plane) return container_of(plane, struct ingenic_ipu, plane); } +static inline struct ingenic_ipu_private_state * +to_ingenic_ipu_priv_state(struct drm_private_state *state) +{ + return container_of(state, struct ingenic_ipu_private_state, base); +} + +static struct ingenic_ipu_private_state * +ingenic_ipu_get_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &priv->private_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_ingenic_ipu_priv_state(priv_state); +} + +static struct ingenic_ipu_private_state * +ingenic_ipu_get_new_priv_state(struct ingenic_ipu *priv, struct drm_atomic_state *state) +{ + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_new_private_obj_state(state, &priv->private_obj); + if (!priv_state) + return NULL; + + return to_ingenic_ipu_priv_state(priv_state); +} + /* * Apply conventional cubic convolution kernel. Both parameters * and return value are 15.16 signed fixed-point. @@ -293,11 +329,16 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, const struct drm_format_info *finfo; u32 ctrl, stride = 0, coef_index = 0, format = 0; bool needs_modeset, upscaling_w, upscaling_h; + struct ingenic_ipu_private_state *ipu_state; int err; if (!newstate || !newstate->fb) return; + ipu_state = ingenic_ipu_get_new_priv_state(ipu, state); + if (WARN_ON(!ipu_state)) + return; + finfo = drm_format_info(newstate->fb->format->format); if (!ipu->clk_enabled) { @@ -470,27 +511,27 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, if (ipu->soc_info->has_bicubic) ctrl |= JZ_IPU_CTRL_ZOOM_SEL; - upscaling_w = ipu->num_w > ipu->denom_w; + upscaling_w = ipu_state->num_w > ipu_state->denom_w; if (upscaling_w) ctrl |= JZ_IPU_CTRL_HSCALE; - if (ipu->num_w != 1 || ipu->denom_w != 1) { + if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) { if (!ipu->soc_info->has_bicubic && !upscaling_w) - coef_index |= (ipu->denom_w - 1) << 16; + coef_index |= (ipu_state->denom_w - 1) << 16; else - coef_index |= (ipu->num_w - 1) << 16; + coef_index |= (ipu_state->num_w - 1) << 16; ctrl |= JZ_IPU_CTRL_HRSZ_EN; } - upscaling_h = ipu->num_h > ipu->denom_h; + upscaling_h = ipu_state->num_h > ipu_state->denom_h; if (upscaling_h) ctrl |= JZ_IPU_CTRL_VSCALE; - if (ipu->num_h != 1 || ipu->denom_h != 1) { + if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) { if (!ipu->soc_info->has_bicubic && !upscaling_h) - coef_index |= ipu->denom_h - 1; + coef_index |= ipu_state->denom_h - 1; else - coef_index |= ipu->num_h - 1; + coef_index |= ipu_state->num_h - 1; ctrl |= JZ_IPU_CTRL_VRSZ_EN; } @@ -501,13 +542,13 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, /* Set the LUT index register */ regmap_write(ipu->map, JZ_REG_IPU_RSZ_COEF_INDEX, coef_index); - if (ipu->num_w != 1 || ipu->denom_w != 1) + if (ipu_state->num_w != 1 || ipu_state->denom_w != 1) ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_HRSZ_COEF_LUT, - ipu->num_w, ipu->denom_w); + ipu_state->num_w, ipu_state->denom_w); - if (ipu->num_h != 1 || ipu->denom_h != 1) + if (ipu_state->num_h != 1 || ipu_state->denom_h != 1) ingenic_ipu_set_coefs(ipu, JZ_REG_IPU_VRSZ_COEF_LUT, - ipu->num_h, ipu->denom_h); + ipu_state->num_h, ipu_state->denom_h); /* Clear STATUS register */ regmap_write(ipu->map, JZ_REG_IPU_STATUS, 0); @@ -519,7 +560,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane, dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n", newstate->src_w >> 16, newstate->src_h >> 16, newstate->crtc_w, newstate->crtc_h, - ipu->num_w, ipu->denom_w, ipu->num_h, ipu->denom_h); + ipu_state->num_w, ipu_state->denom_w, + ipu_state->num_h, ipu_state->denom_h); } static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, @@ -533,6 +575,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane); struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; struct drm_crtc_state *crtc_state; + struct ingenic_ipu_private_state *ipu_state; if (!crtc) return 0; @@ -541,6 +584,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (WARN_ON(!crtc_state)) return -EINVAL; + ipu_state = ingenic_ipu_get_priv_state(ipu, state); + if (IS_ERR(ipu_state)) + return PTR_ERR(ipu_state); + /* Request a full modeset if we are enabling or disabling the IPU. */ if (!old_plane_state->crtc ^ !new_plane_state->crtc) crtc_state->mode_changed = true; @@ -593,10 +640,10 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane, if (num_h > max_h) return -EINVAL; - ipu->num_w = num_w; - ipu->num_h = num_h; - ipu->denom_w = denom_w; - ipu->denom_h = denom_h; + ipu_state->num_w = num_w; + ipu_state->num_h = num_h; + ipu_state->denom_w = denom_w; + ipu_state->denom_h = denom_h; out_check_damage: if (ingenic_drm_map_noncoherent(ipu->master)) @@ -679,6 +726,33 @@ static const struct drm_plane_funcs ingenic_ipu_plane_funcs = { .atomic_set_property = ingenic_ipu_plane_atomic_set_property, }; +static struct drm_private_state * +ingenic_ipu_duplicate_state(struct drm_private_obj *obj) +{ + struct ingenic_ipu_private_state *state = to_ingenic_ipu_priv_state(obj->state); + + state = kmemdup(state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void ingenic_ipu_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct ingenic_ipu_private_state *priv_state = to_ingenic_ipu_priv_state(state); + + kfree(priv_state); +} + +static const struct drm_private_state_funcs ingenic_ipu_private_state_funcs = { + .atomic_duplicate_state = ingenic_ipu_duplicate_state, + .atomic_destroy_state = ingenic_ipu_destroy_state, +}; + static irqreturn_t ingenic_ipu_irq_handler(int irq, void *arg) { struct ingenic_ipu *ipu = arg; @@ -717,6 +791,7 @@ static const struct regmap_config ingenic_ipu_regmap_config = { static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) { struct platform_device *pdev = to_platform_device(dev); + struct ingenic_ipu_private_state *private_state; const struct soc_info *soc_info; struct drm_device *drm = d; struct drm_plane *plane; @@ -810,7 +885,20 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d) return err; } + private_state = kzalloc(sizeof(*private_state), GFP_KERNEL); + if (!private_state) { + err = -ENOMEM; + goto err_clk_unprepare; + } + + drm_atomic_private_obj_init(drm, &ipu->private_obj, &private_state->base, + &ingenic_ipu_private_state_funcs); + return 0; + +err_clk_unprepare: + clk_unprepare(ipu->clk); + return err; } static void ingenic_ipu_unbind(struct device *dev, @@ -818,6 +906,7 @@ static void ingenic_ipu_unbind(struct device *dev, { struct ingenic_ipu *ipu = dev_get_drvdata(dev); + drm_atomic_private_obj_fini(&ipu->private_obj); clk_unprepare(ipu->clk); } diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig index bc4cb5e1cd8a..5fdd43dad507 100644 --- a/drivers/gpu/drm/kmb/Kconfig +++ b/drivers/gpu/drm/kmb/Kconfig @@ -3,7 +3,6 @@ config DRM_KMB_DISPLAY depends on DRM depends on ARCH_KEEMBAY || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_MIPI_DSI help diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c index 44327bc629ca..06613ffeaaf8 100644 --- a/drivers/gpu/drm/kmb/kmb_crtc.c +++ b/drivers/gpu/drm/kmb/kmb_crtc.c @@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = { .disable_vblank = kmb_crtc_disable_vblank, }; -static void kmb_crtc_set_mode(struct drm_crtc *crtc) +static void kmb_crtc_set_mode(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct drm_display_mode *m = &crtc->state->adjusted_mode; @@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc) unsigned int val = 0; /* Initialize mipi */ - kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz); + kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state); drm_info(dev, "vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n", m->crtc_vsync_start - m->crtc_vdisplay, @@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc, struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc); clk_prepare_enable(kmb->kmb_clk.clk_lcd); - kmb_crtc_set_mode(crtc); + kmb_crtc_set_mode(crtc, state); drm_crtc_vblank_on(crtc); } @@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc, spin_unlock_irq(&crtc->dev->event_lock); } +static enum drm_mode_status + kmb_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + int refresh; + struct drm_device *dev = crtc->dev; + int vfp = mode->vsync_start - mode->vdisplay; + + if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) { + drm_dbg(dev, "height = %d less than %d", + mode->vdisplay, KMB_CRTC_MAX_HEIGHT); + return MODE_BAD_VVALUE; + } + if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) { + drm_dbg(dev, "width = %d less than %d", + mode->hdisplay, KMB_CRTC_MAX_WIDTH); + return MODE_BAD_HVALUE; + } + refresh = drm_mode_vrefresh(mode); + if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) { + drm_dbg(dev, "refresh = %d less than %d or greater than %d", + refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH); + return MODE_BAD; + } + + if (vfp < KMB_CRTC_MIN_VFP) { + drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP); + return MODE_BAD; + } + + return MODE_OK; +} + static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = { .atomic_begin = kmb_crtc_atomic_begin, .atomic_enable = kmb_crtc_atomic_enable, .atomic_disable = kmb_crtc_atomic_disable, .atomic_flush = kmb_crtc_atomic_flush, + .mode_valid = kmb_crtc_mode_valid, }; int kmb_setup_crtc(struct drm_device *drm) diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 1c2f4799f421..ed2424350773 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> @@ -172,10 +173,11 @@ static int kmb_setup_mode_config(struct drm_device *drm) ret = drmm_mode_config_init(drm); if (ret) return ret; - drm->mode_config.min_width = KMB_MIN_WIDTH; - drm->mode_config.min_height = KMB_MIN_HEIGHT; - drm->mode_config.max_width = KMB_MAX_WIDTH; - drm->mode_config.max_height = KMB_MAX_HEIGHT; + drm->mode_config.min_width = KMB_FB_MIN_WIDTH; + drm->mode_config.min_height = KMB_FB_MIN_HEIGHT; + drm->mode_config.max_width = KMB_FB_MAX_WIDTH; + drm->mode_config.max_height = KMB_FB_MAX_HEIGHT; + drm->mode_config.preferred_depth = 24; drm->mode_config.funcs = &kmb_mode_config_funcs; ret = kmb_setup_crtc(drm); @@ -380,7 +382,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev) if (val & LAYER3_DMA_FIFO_UNDERFLOW) drm_dbg(&kmb->drm, "LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val); - if (val & LAYER3_DMA_FIFO_UNDERFLOW) + if (val & LAYER3_DMA_FIFO_OVERFLOW) drm_dbg(&kmb->drm, "LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val); } @@ -559,6 +561,8 @@ static int kmb_probe(struct platform_device *pdev) if (ret) goto err_register; + drm_fbdev_generic_setup(&kmb->drm, 0); + return 0; err_register: diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h index ebbaa5f422d5..bf085e95b28f 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.h +++ b/drivers/gpu/drm/kmb/kmb_drv.h @@ -20,6 +20,18 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 +/* Platform definitions */ +#define KMB_CRTC_MIN_VFP 4 +#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */ +#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */ +#define KMB_CRTC_MIN_WIDTH 1920 +#define KMB_CRTC_MIN_HEIGHT 1080 +#define KMB_FB_MAX_WIDTH 1920 +#define KMB_FB_MAX_HEIGHT 1080 +#define KMB_FB_MIN_WIDTH 1 +#define KMB_FB_MIN_HEIGHT 1 +#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */ +#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */ #define KMB_LCD_DEFAULT_CLK 200000000 #define KMB_SYS_CLK_MHZ 500 @@ -45,6 +57,7 @@ struct kmb_drm_private { spinlock_t irq_lock; int irq_lcd; int sys_clk_mhz; + struct disp_cfg init_disp_cfg[KMB_MAX_PLANES]; struct layer_status plane_status[KMB_MAX_PLANES]; int kmb_under_flow; int kmb_flush_done; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c index 1793cd31b117..f6071882054c 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.c +++ b/drivers/gpu/drm/kmb/kmb_dsi.c @@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi, return 0; } +#define CLK_DIFF_LOW 50 +#define CLK_DIFF_HI 60 +#define SYSCLK_500 500 + static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, struct mipi_tx_frame_timing_cfg *fg_cfg) { @@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen, /* 500 Mhz system clock minus 50 to account for the difference in * MIPI clock speed in RTL tests */ - sysclk = kmb_dsi->sys_clk_mhz - 50; + if (kmb_dsi->sys_clk_mhz == SYSCLK_500) { + sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW; + } else { + /* 700 Mhz clk*/ + sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI; + } /* PPL-Pixel Packing Layer, LLP-Low Level Protocol * Frame genartor timing parameters are clocked on the system clock, @@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi, return 0; } -static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) +static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi, + struct drm_atomic_state *old_state) { struct regmap *msscam; @@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) dev_dbg(kmb_dsi->dev, "failed to get msscam syscon"); return; } - + drm_atomic_bridge_chain_enable(adv_bridge, old_state); /* DISABLE MIPI->CIF CONNECTION */ regmap_write(msscam, MSS_MIPI_CIF_CFG, 0); @@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi) } int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, - int sys_clk_mhz) + int sys_clk_mhz, struct drm_atomic_state *old_state) { u64 data_rate; @@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, mipi_tx_init_cfg.lane_rate_mbps = data_rate; } - kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0); - kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0); - /* Initialize mipi controller */ mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg); /* Dphy initialization */ mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg); - connect_lcd_to_mipi(kmb_dsi); + connect_lcd_to_mipi(kmb_dsi, old_state); dev_info(kmb_dsi->dev, "mipi hw initialized"); return 0; diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h index 66b7c500d9bc..09dc88743d77 100644 --- a/drivers/gpu/drm/kmb/kmb_dsi.h +++ b/drivers/gpu/drm/kmb/kmb_dsi.h @@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev); struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev); void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi); int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode, - int sys_clk_mhz); + int sys_clk_mhz, struct drm_atomic_state *old_state); int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi); int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi); int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi); diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index ecee6782612d..00404ba4126d 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = { static unsigned int check_pixel_format(struct drm_plane *plane, u32 format) { + struct kmb_drm_private *kmb; + struct kmb_plane *kmb_plane = to_kmb_plane(plane); int i; + int plane_id = kmb_plane->id; + struct disp_cfg init_disp_cfg; + kmb = to_kmb(plane->dev); + init_disp_cfg = kmb->init_disp_cfg[plane_id]; + /* Due to HW limitations, changing pixel format after initial + * plane configuration is not supported. + */ + if (init_disp_cfg.format && init_disp_cfg.format != format) { + drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration"); + return -EINVAL; + } for (i = 0; i < plane->format_count; i++) { if (plane->format_types[i] == format) return 0; @@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct kmb_drm_private *kmb; + struct kmb_plane *kmb_plane = to_kmb_plane(plane); + int plane_id = kmb_plane->id; + struct disp_cfg init_disp_cfg; struct drm_framebuffer *fb; int ret; struct drm_crtc_state *crtc_state; bool can_position; + kmb = to_kmb(plane->dev); + init_disp_cfg = kmb->init_disp_cfg[plane_id]; fb = new_plane_state->fb; if (!fb || !new_plane_state->crtc) return 0; @@ -94,10 +113,21 @@ static int kmb_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT) + if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH || + new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT || + new_plane_state->crtc_w < KMB_FB_MIN_WIDTH || + new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT) return -EINVAL; - if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT) + + /* Due to HW limitations, changing plane height or width after + * initial plane configuration is not supported. + */ + if ((init_disp_cfg.width && init_disp_cfg.height) && + (init_disp_cfg.width != fb->width || + init_disp_cfg.height != fb->height)) { + drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration"); return -EINVAL; + } can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); crtc_state = drm_atomic_get_existing_crtc_state(state, @@ -277,6 +307,44 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id) kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]); } +static void kmb_plane_set_alpha(struct kmb_drm_private *kmb, + const struct drm_plane_state *state, + unsigned char plane_id, + unsigned int *val) +{ + u16 plane_alpha = state->alpha; + u16 pixel_blend_mode = state->pixel_blend_mode; + int has_alpha = state->fb->format->has_alpha; + + if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE) + *val |= LCD_LAYER_ALPHA_STATIC; + + if (has_alpha) { + switch (pixel_blend_mode) { + case DRM_MODE_BLEND_PIXEL_NONE: + break; + case DRM_MODE_BLEND_PREMULTI: + *val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT; + break; + case DRM_MODE_BLEND_COVERAGE: + *val |= LCD_LAYER_ALPHA_EMBED; + break; + default: + DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n", + __stringify(pixel_blend_mode), + (long)pixel_blend_mode); + break; + } + } + + if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) { + *val &= LCD_LAYER_ALPHA_DISABLED; + return; + } + + kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha); +} + static void kmb_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -296,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, unsigned char plane_id; int num_planes; static dma_addr_t addr[MAX_SUB_PLANES]; + struct disp_cfg *init_disp_cfg; if (!plane || !new_plane_state || !old_plane_state) return; @@ -303,11 +372,12 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, fb = new_plane_state->fb; if (!fb) return; + num_planes = fb->format->num_planes; kmb_plane = to_kmb_plane(plane); - plane_id = kmb_plane->id; kmb = to_kmb(plane->dev); + plane_id = kmb_plane->id; spin_lock_irq(&kmb->irq_lock); if (kmb->kmb_under_flow || kmb->kmb_flush_done) { @@ -317,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, } spin_unlock_irq(&kmb->irq_lock); - src_w = (new_plane_state->src_w >> 16); + init_disp_cfg = &kmb->init_disp_cfg[plane_id]; + src_w = new_plane_state->src_w >> 16; src_h = new_plane_state->src_h >> 16; crtc_x = new_plane_state->crtc_x; crtc_y = new_plane_state->crtc_y; @@ -400,20 +471,32 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, config_csc(kmb, plane_id); } + kmb_plane_set_alpha(kmb, plane->state, plane_id, &val); + kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val); + /* Configure LCD_CONTROL */ + ctrl = kmb_read_lcd(kmb, LCD_CONTROL); + + /* Set layer blending config */ + ctrl &= ~LCD_CTRL_ALPHA_ALL; + ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 | + LCD_CTRL_ALPHA_BLEND_VL2; + + ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE; + switch (plane_id) { case LAYER_0: - ctrl = LCD_CTRL_VL1_ENABLE; + ctrl |= LCD_CTRL_VL1_ENABLE; break; case LAYER_1: - ctrl = LCD_CTRL_VL2_ENABLE; + ctrl |= LCD_CTRL_VL2_ENABLE; break; case LAYER_2: - ctrl = LCD_CTRL_GL1_ENABLE; + ctrl |= LCD_CTRL_GL1_ENABLE; break; case LAYER_3: - ctrl = LCD_CTRL_GL2_ENABLE; + ctrl |= LCD_CTRL_GL2_ENABLE; break; } @@ -425,7 +508,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, */ ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL; - kmb_set_bitmask_lcd(kmb, LCD_CONTROL, ctrl); + kmb_write_lcd(kmb, LCD_CONTROL, ctrl); /* Enable pipeline AXI read transactions for the DMA * after setting graphics layers. This must be done @@ -448,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane, /* Enable DMA */ kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg); + + /* Save initial display config */ + if (!init_disp_cfg->width || + !init_disp_cfg->height || + !init_disp_cfg->format) { + init_disp_cfg->width = width; + init_disp_cfg->height = height; + init_disp_cfg->format = fb->format->format; + } + drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg, kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id))); @@ -490,6 +583,9 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm) enum drm_plane_type plane_type; const u32 *plane_formats; int num_plane_formats; + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); for (i = 0; i < KMB_MAX_PLANES; i++) { plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL); @@ -521,8 +617,16 @@ struct kmb_plane *kmb_plane_init(struct drm_device *drm) drm_dbg(drm, "%s : %d i=%d type=%d", __func__, __LINE__, i, plane_type); + drm_plane_create_alpha_property(&plane->base_plane); + + drm_plane_create_blend_mode_property(&plane->base_plane, + blend_caps); + + drm_plane_create_zpos_immutable_property(&plane->base_plane, i); + drm_plane_helper_add(&plane->base_plane, &kmb_plane_helper_funcs); + if (plane_type == DRM_PLANE_TYPE_PRIMARY) { primary = plane; kmb->plane = plane; diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h index 486490f7a3ec..b51144044fe8 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.h +++ b/drivers/gpu/drm/kmb/kmb_plane.h @@ -35,6 +35,9 @@ #define POSSIBLE_CRTCS 1 #define to_kmb_plane(x) container_of(x, struct kmb_plane, base_plane) +#define POSSIBLE_CRTCS 1 +#define KMB_MAX_PLANES 2 + enum layer_id { LAYER_0, LAYER_1, @@ -43,8 +46,6 @@ enum layer_id { /* KMB_MAX_PLANES */ }; -#define KMB_MAX_PLANES 1 - enum sub_plane_id { Y_PLANE, U_PLANE, @@ -62,6 +63,12 @@ struct layer_status { u32 ctrl; }; +struct disp_cfg { + unsigned int width; + unsigned int height; + unsigned int format; +}; + struct kmb_plane *kmb_plane_init(struct drm_device *drm); void kmb_plane_destroy(struct drm_plane *plane); #endif /* __KMB_PLANE_H__ */ diff --git a/drivers/gpu/drm/kmb/kmb_regs.h b/drivers/gpu/drm/kmb/kmb_regs.h index 48150569f702..9756101b0d32 100644 --- a/drivers/gpu/drm/kmb/kmb_regs.h +++ b/drivers/gpu/drm/kmb/kmb_regs.h @@ -43,8 +43,10 @@ #define LCD_CTRL_OUTPUT_ENABLED BIT(19) #define LCD_CTRL_BPORCH_ENABLE BIT(21) #define LCD_CTRL_FPORCH_ENABLE BIT(22) +#define LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE BIT(23) #define LCD_CTRL_PIPELINE_DMA BIT(28) #define LCD_CTRL_VHSYNC_IDLE_LVL BIT(31) +#define LCD_CTRL_ALPHA_ALL (0xff << 6) /* interrupts */ #define LCD_INT_STATUS (0x4 * 0x001) @@ -115,6 +117,7 @@ #define LCD_LAYER_ALPHA_EMBED BIT(5) #define LCD_LAYER_ALPHA_COMBI (LCD_LAYER_ALPHA_STATIC | \ LCD_LAYER_ALPHA_EMBED) +#define LCD_LAYER_ALPHA_DISABLED ~(LCD_LAYER_ALPHA_COMBI) /* RGB multiplied with alpha */ #define LCD_LAYER_ALPHA_PREMULT BIT(6) #define LCD_LAYER_INVERT_COL BIT(7) diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 65fdca366e41..02cef0cea657 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -4,6 +4,7 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> @@ -357,6 +358,7 @@ int lima_device_init(struct lima_device *ldev) int err, i; dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(ldev->dev, UINT_MAX); err = lima_clk_init(ldev); if (err) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 640acc060467..f9a9198ef198 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -127,7 +127,7 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, if (err) goto out; } else { - struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj); + struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem); if (IS_ERR(sgt)) { err = PTR_ERR(sgt); @@ -151,7 +151,7 @@ static void lima_gem_free_object(struct drm_gem_object *obj) if (!list_empty(&bo->va)) dev_err(obj->dev->dev, "lima gem free bo still has va\n"); - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) @@ -179,7 +179,7 @@ static int lima_gem_pin(struct drm_gem_object *obj) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_pin(obj); + return drm_gem_shmem_pin(&bo->base); } static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) @@ -189,7 +189,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_vmap(obj, map); + return drm_gem_shmem_vmap(&bo->base, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) @@ -199,19 +199,19 @@ static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_mmap(obj, vma); + return drm_gem_shmem_mmap(&bo->base, vma); } static const struct drm_gem_object_funcs lima_gem_funcs = { .free = lima_gem_free_object, .open = lima_gem_object_open, .close = lima_gem_object_close, - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .pin = lima_gem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, .vmap = lima_gem_vmap, - .vunmap = drm_gem_shmem_vunmap, + .vunmap = drm_gem_shmem_object_vunmap, .mmap = lima_gem_mmap, }; @@ -221,7 +221,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return NULL; + return ERR_PTR(-ENOMEM); mutex_init(&bo->lock); INIT_LIST_HEAD(&bo->va); diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 99d5f6f1a882..5612d73f238f 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - ret = drm_gem_shmem_vmap(&bo->base.base, &map); + ret = drm_gem_shmem_vmap(&bo->base, &map); if (ret) { kvfree(et); goto out; @@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_shmem_vunmap(&bo->base.base, &map); + drm_gem_shmem_vunmap(&bo->base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig index 71c689b573c9..d0bf1bc8da3f 100644 --- a/drivers/gpu/drm/mcde/Kconfig +++ b/drivers/gpu/drm/mcde/Kconfig @@ -10,7 +10,6 @@ config DRM_MCDE select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c index 141cb36b9c07..3a53ebc4e172 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c @@ -205,9 +205,15 @@ static const struct mtk_disp_ccorr_data mt8183_ccorr_driver_data = { .matrix_bits = 10, }; +static const struct mtk_disp_ccorr_data mt8192_ccorr_driver_data = { + .matrix_bits = 11, +}; + static const struct of_device_id mtk_disp_ccorr_driver_dt_match[] = { { .compatible = "mediatek,mt8183-disp-ccorr", .data = &mt8183_ccorr_driver_data}, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = &mt8192_ccorr_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ccorr_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 5326989d5206..2146299e5f52 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -456,6 +456,22 @@ static const struct mtk_disp_ovl_data mt8183_ovl_2l_driver_data = { .fmt_rgb565_is_0 = true, }; +static const struct mtk_disp_ovl_data mt8192_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 4, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + +static const struct mtk_disp_ovl_data mt8192_ovl_2l_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 10, + .layer_nr = 2, + .fmt_rgb565_is_0 = true, + .smi_id_en = true, +}; + static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-ovl", .data = &mt2701_ovl_driver_data}, @@ -465,6 +481,10 @@ static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { .data = &mt8183_ovl_driver_data}, { .compatible = "mediatek,mt8183-disp-ovl-2l", .data = &mt8183_ovl_2l_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = &mt8192_ovl_driver_data}, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = &mt8192_ovl_2l_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 75d7f45579e2..d41a3970b944 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -353,6 +353,10 @@ static const struct mtk_disp_rdma_data mt8183_rdma_driver_data = { .fifo_size = 5 * SZ_1K, }; +static const struct mtk_disp_rdma_data mt8192_rdma_driver_data = { + .fifo_size = 5 * SZ_1K, +}; + static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-rdma", .data = &mt2701_rdma_driver_data}, @@ -360,6 +364,8 @@ static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { .data = &mt8173_rdma_driver_data}, { .compatible = "mediatek,mt8183-disp-rdma", .data = &mt8183_rdma_driver_data}, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = &mt8192_rdma_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 5f81489fc60c..d661edf7e0fe 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -52,8 +52,7 @@ struct mtk_drm_crtc { bool pending_async_planes; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct mbox_client cmdq_cl; - struct mbox_chan *cmdq_chan; + struct cmdq_client cmdq_client; struct cmdq_pkt cmdq_handle; u32 cmdq_event; u32 cmdq_vblank_cnt; @@ -109,12 +108,60 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) } } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, + size_t size) +{ + struct device *dev; + dma_addr_t dma_addr; + + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) { + kfree(pkt); + return -ENOMEM; + } + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + kfree(pkt); + return -ENOMEM; + } + + pkt->pa_base = dma_addr; + + return 0; +} + +static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); + kfree(pkt); +} +#endif + static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle); + if (mtk_crtc->cmdq_client.chan) { + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } +#endif drm_crtc_cleanup(crtc); } @@ -227,49 +274,17 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, } #if IS_REACHABLE(CONFIG_MTK_CMDQ) -static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt, - size_t size) -{ - struct device *dev; - dma_addr_t dma_addr; - - pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) { - kfree(pkt); - return -ENOMEM; - } - pkt->buf_size = size; - - dev = chan->mbox->dev; - dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); - kfree(pkt->va_base); - kfree(pkt); - return -ENOMEM; - } - - pkt->pa_base = dma_addr; - - return 0; -} - -static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt) -{ - dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size, - DMA_TO_DEVICE); - kfree(pkt->va_base); - kfree(pkt); -} - static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { - struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl); struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); struct mtk_crtc_state *state; unsigned int i; + if (data->sta < 0) + return; + state = to_mtk_crtc_state(mtk_crtc->base.state); state->pending_config = false; @@ -299,7 +314,6 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) } mtk_crtc->cmdq_vblank_cnt = 0; - mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt); } #endif @@ -550,24 +564,28 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_chan) { - mbox_flush(mtk_crtc->cmdq_chan, 2000); + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); cmdq_handle->cmd_buf_size = 0; cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev, - cmdq_handle->pa_base, - cmdq_handle->cmd_buf_size, - DMA_TO_DEVICE); + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); /* - * CMDQ command should execute in next vblank, - * If it fail to execute in next 2 vblank, timeout happen. + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. */ - mtk_crtc->cmdq_vblank_cnt = 2; - mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle); - mbox_client_txdone(mtk_crtc->cmdq_chan, 0); + mtk_crtc->cmdq_vblank_cnt = 3; + + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif mtk_crtc->config_updating = false; @@ -581,7 +599,7 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private; #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) mtk_crtc_ddp_config(crtc, NULL); else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", @@ -924,20 +942,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock); #if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev; - mtk_crtc->cmdq_cl.tx_block = false; - mtk_crtc->cmdq_cl.knows_txdone = true; - mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb; - mtk_crtc->cmdq_chan = - mbox_request_channel(&mtk_crtc->cmdq_cl, - drm_crtc_index(&mtk_crtc->base)); - if (IS_ERR(mtk_crtc->cmdq_chan)) { + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, + drm_crtc_index(&mtk_crtc->base)); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_chan = NULL; + mtk_crtc->cmdq_client.chan = NULL; } - if (mtk_crtc->cmdq_chan) { + if (mtk_crtc->cmdq_client.chan) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), @@ -945,17 +963,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_chan); - mtk_crtc->cmdq_chan = NULL; + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; } else { - ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan, - &mtk_crtc->cmdq_handle, - PAGE_SIZE); + ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); if (ret) { dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", drm_crtc_index(&mtk_crtc->base)); - mbox_free_channel(mtk_crtc->cmdq_chan); - mtk_crtc->cmdq_chan = NULL; + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; } } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 99cbf44463e4..b4b682bc1991 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -20,45 +20,39 @@ #include "mtk_drm_ddp_comp.h" #include "mtk_drm_crtc.h" -#define DISP_OD_EN 0x0000 -#define DISP_OD_INTEN 0x0008 -#define DISP_OD_INTSTA 0x000c -#define DISP_OD_CFG 0x0020 -#define DISP_OD_SIZE 0x0030 -#define DISP_DITHER_5 0x0114 -#define DISP_DITHER_7 0x011c -#define DISP_DITHER_15 0x013c -#define DISP_DITHER_16 0x0140 -#define DISP_REG_UFO_START 0x0000 - -#define DISP_DITHER_EN 0x0000 +#define DISP_REG_DITHER_EN 0x0000 #define DITHER_EN BIT(0) -#define DISP_DITHER_CFG 0x0020 +#define DISP_REG_DITHER_CFG 0x0020 #define DITHER_RELAY_MODE BIT(0) #define DITHER_ENGINE_EN BIT(1) -#define DISP_DITHER_SIZE 0x0030 - -#define LUT_10BIT_MASK 0x03ff - -#define OD_RELAYMODE BIT(0) - -#define UFO_BYPASS BIT(2) - #define DISP_DITHERING BIT(2) +#define DISP_REG_DITHER_SIZE 0x0030 +#define DISP_REG_DITHER_5 0x0114 +#define DISP_REG_DITHER_7 0x011c +#define DISP_REG_DITHER_15 0x013c #define DITHER_LSB_ERR_SHIFT_R(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_R(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_R(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_R(x) (((x) & 0x7) << 16) #define DITHER_NEW_BIT_MODE BIT(0) +#define DISP_REG_DITHER_16 0x0140 #define DITHER_LSB_ERR_SHIFT_B(x) (((x) & 0x7) << 28) -#define DITHER_OVFLW_BIT_B(x) (((x) & 0x7) << 24) #define DITHER_ADD_LSHIFT_B(x) (((x) & 0x7) << 20) -#define DITHER_ADD_RSHIFT_B(x) (((x) & 0x7) << 16) #define DITHER_LSB_ERR_SHIFT_G(x) (((x) & 0x7) << 12) -#define DITHER_OVFLW_BIT_G(x) (((x) & 0x7) << 8) #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) -#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) + +#define DISP_REG_OD_EN 0x0000 +#define DISP_REG_OD_CFG 0x0020 +#define OD_RELAYMODE BIT(0) +#define DISP_REG_OD_SIZE 0x0030 + +#define DISP_REG_POSTMASK_EN 0x0000 +#define POSTMASK_EN BIT(0) +#define DISP_REG_POSTMASK_CFG 0x0020 +#define POSTMASK_RELAY_MODE BIT(0) +#define DISP_REG_POSTMASK_SIZE 0x0030 + +#define DISP_REG_UFO_START 0x0000 +#define UFO_BYPASS BIT(2) struct mtk_ddp_comp_dev { struct clk *clk; @@ -134,25 +128,52 @@ void mtk_dither_set_common(void __iomem *regs, struct cmdq_client_reg *cmdq_reg, return; if (bpc >= MTK_MIN_BPC) { - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_5); - mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_DITHER_7); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_5); + mtk_ddp_write(cmdq_pkt, 0, cmdq_reg, regs, DISP_REG_DITHER_7); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_R(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_R(MTK_MAX_BPC - bpc) | DITHER_NEW_BIT_MODE, - cmdq_reg, regs, DISP_DITHER_15); + cmdq_reg, regs, DISP_REG_DITHER_15); mtk_ddp_write(cmdq_pkt, DITHER_LSB_ERR_SHIFT_B(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_B(MTK_MAX_BPC - bpc) | DITHER_LSB_ERR_SHIFT_G(MTK_MAX_BPC - bpc) | DITHER_ADD_LSHIFT_G(MTK_MAX_BPC - bpc), - cmdq_reg, regs, DISP_DITHER_16); + cmdq_reg, regs, DISP_REG_DITHER_16); mtk_ddp_write(cmdq_pkt, dither_en, cmdq_reg, regs, cfg); } } +static void mtk_dither_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_REG_DITHER_SIZE); + mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, + DISP_REG_DITHER_CFG); + mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_REG_DITHER_CFG, + DITHER_ENGINE_EN, cmdq_pkt); +} + +static void mtk_dither_start(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel(DITHER_EN, priv->regs + DISP_REG_DITHER_EN); +} + +static void mtk_dither_stop(struct device *dev) +{ + struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); + + writel_relaxed(0x0, priv->regs + DISP_REG_DITHER_EN); +} + static void mtk_dither_set(struct device *dev, unsigned int bpc, - unsigned int cfg, struct cmdq_pkt *cmdq_pkt) + unsigned int cfg, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); @@ -166,49 +187,49 @@ static void mtk_od_config(struct device *dev, unsigned int w, { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_OD_SIZE); - mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_OD_CFG); - mtk_dither_set(dev, bpc, DISP_OD_CFG, cmdq_pkt); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, DISP_REG_OD_SIZE); + mtk_ddp_write(cmdq_pkt, OD_RELAYMODE, &priv->cmdq_reg, priv->regs, DISP_REG_OD_CFG); + mtk_dither_set(dev, bpc, DISP_REG_OD_CFG, cmdq_pkt); } static void mtk_od_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(1, priv->regs + DISP_OD_EN); + writel(1, priv->regs + DISP_REG_OD_EN); } -static void mtk_ufoe_start(struct device *dev) +static void mtk_postmask_config(struct device *dev, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc, struct cmdq_pkt *cmdq_pkt) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); + mtk_ddp_write(cmdq_pkt, w << 16 | h, &priv->cmdq_reg, priv->regs, + DISP_REG_POSTMASK_SIZE); + mtk_ddp_write(cmdq_pkt, POSTMASK_RELAY_MODE, &priv->cmdq_reg, + priv->regs, DISP_REG_POSTMASK_CFG); } -static void mtk_dither_config(struct device *dev, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc, struct cmdq_pkt *cmdq_pkt) +static void mtk_postmask_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - mtk_ddp_write(cmdq_pkt, h << 16 | w, &priv->cmdq_reg, priv->regs, DISP_DITHER_SIZE); - mtk_ddp_write(cmdq_pkt, DITHER_RELAY_MODE, &priv->cmdq_reg, priv->regs, DISP_DITHER_CFG); - mtk_dither_set_common(priv->regs, &priv->cmdq_reg, bpc, DISP_DITHER_CFG, - DITHER_ENGINE_EN, cmdq_pkt); + writel(POSTMASK_EN, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_start(struct device *dev) +static void mtk_postmask_stop(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel(DITHER_EN, priv->regs + DISP_DITHER_EN); + writel_relaxed(0x0, priv->regs + DISP_REG_POSTMASK_EN); } -static void mtk_dither_stop(struct device *dev) +static void mtk_ufoe_start(struct device *dev) { struct mtk_ddp_comp_dev *priv = dev_get_drvdata(dev); - writel_relaxed(0x0, priv->regs + DISP_DITHER_EN); + writel(UFO_BYPASS, priv->regs + DISP_REG_UFO_START); } static const struct mtk_ddp_comp_funcs ddp_aal = { @@ -286,6 +307,14 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .bgclr_in_off = mtk_ovl_bgclr_in_off, }; +static const struct mtk_ddp_comp_funcs ddp_postmask = { + .clk_enable = mtk_ddp_clk_enable, + .clk_disable = mtk_ddp_clk_disable, + .config = mtk_postmask_config, + .start = mtk_postmask_start, + .stop = mtk_postmask_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_rdma = { .clk_enable = mtk_rdma_clk_enable, .clk_disable = mtk_rdma_clk_disable, @@ -305,22 +334,23 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { }; static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { + [MTK_DISP_AAL] = "aal", + [MTK_DISP_BLS] = "bls", + [MTK_DISP_CCORR] = "ccorr", + [MTK_DISP_COLOR] = "color", + [MTK_DISP_DITHER] = "dither", + [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_MUTEX] = "mutex", + [MTK_DISP_OD] = "od", [MTK_DISP_OVL] = "ovl", [MTK_DISP_OVL_2L] = "ovl-2l", + [MTK_DISP_POSTMASK] = "postmask", + [MTK_DISP_PWM] = "pwm", [MTK_DISP_RDMA] = "rdma", - [MTK_DISP_WDMA] = "wdma", - [MTK_DISP_COLOR] = "color", - [MTK_DISP_CCORR] = "ccorr", - [MTK_DISP_AAL] = "aal", - [MTK_DISP_GAMMA] = "gamma", - [MTK_DISP_DITHER] = "dither", [MTK_DISP_UFOE] = "ufoe", - [MTK_DSI] = "dsi", + [MTK_DISP_WDMA] = "wdma", [MTK_DPI] = "dpi", - [MTK_DISP_PWM] = "pwm", - [MTK_DISP_MUTEX] = "mutex", - [MTK_DISP_OD] = "od", - [MTK_DISP_BLS] = "bls", + [MTK_DSI] = "dsi", }; struct mtk_ddp_comp_match { @@ -330,35 +360,38 @@ struct mtk_ddp_comp_match { }; static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, - [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, - [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, - [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, - [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, - [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, - [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, - [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, - [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, - [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, - [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, - [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, - [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, - [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, - [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, - [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, - [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, - [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, - [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, - [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, - [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, - [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, - [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, - [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, + [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, + [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, &ddp_dpi }, + [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, &ddp_dpi }, + [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, &ddp_dsi }, + [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, &ddp_dsi }, + [DDP_COMPONENT_DSI2] = { MTK_DSI, 2, &ddp_dsi }, + [DDP_COMPONENT_DSI3] = { MTK_DSI, 3, &ddp_dsi }, + [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, &ddp_gamma }, + [DDP_COMPONENT_OD0] = { MTK_DISP_OD, 0, &ddp_od }, + [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, + [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, &ddp_ovl }, + [DDP_COMPONENT_OVL_2L2] = { MTK_DISP_OVL_2L, 2, &ddp_ovl }, + [DDP_COMPONENT_POSTMASK0] = { MTK_DISP_POSTMASK, 0, &ddp_postmask }, + [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, + [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, + [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, + [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, &ddp_rdma }, + [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, &ddp_rdma }, + [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, &ddp_rdma }, + [DDP_COMPONENT_RDMA4] = { MTK_DISP_RDMA, 4, &ddp_rdma }, + [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe }, + [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL }, + [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; static bool mtk_drm_find_comp_in_ddp(struct device *dev, @@ -475,12 +508,12 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp, type == MTK_DISP_CCORR || type == MTK_DISP_COLOR || type == MTK_DISP_GAMMA || - type == MTK_DPI || - type == MTK_DSI || type == MTK_DISP_OVL || type == MTK_DISP_OVL_2L || type == MTK_DISP_PWM || - type == MTK_DISP_RDMA) + type == MTK_DISP_RDMA || + type == MTK_DPI || + type == MTK_DSI) return 0; priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index bb914d976cf5..4c6a98662305 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -18,22 +18,23 @@ struct mtk_plane_state; struct drm_crtc_state; enum mtk_ddp_comp_type { - MTK_DISP_OVL, - MTK_DISP_OVL_2L, - MTK_DISP_RDMA, - MTK_DISP_WDMA, - MTK_DISP_COLOR, + MTK_DISP_AAL, + MTK_DISP_BLS, MTK_DISP_CCORR, + MTK_DISP_COLOR, MTK_DISP_DITHER, - MTK_DISP_AAL, MTK_DISP_GAMMA, - MTK_DISP_UFOE, - MTK_DSI, - MTK_DPI, - MTK_DISP_PWM, MTK_DISP_MUTEX, MTK_DISP_OD, - MTK_DISP_BLS, + MTK_DISP_OVL, + MTK_DISP_OVL_2L, + MTK_DISP_POSTMASK, + MTK_DISP_PWM, + MTK_DISP_RDMA, + MTK_DISP_UFOE, + MTK_DISP_WDMA, + MTK_DPI, + MTK_DSI, MTK_DDP_COMP_TYPE_MAX, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index aec39724ebeb..56ff8c57ef8f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -158,6 +158,25 @@ static const enum mtk_ddp_comp_id mt8183_mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_CCORR, + DDP_COMPONENT_AAL0, + DDP_COMPONENT_GAMMA, + DDP_COMPONENT_POSTMASK0, + DDP_COMPONENT_DITHER, + DDP_COMPONENT_DSI0, +}; + +static const enum mtk_ddp_comp_id mt8192_mtk_ddp_ext[] = { + DDP_COMPONENT_OVL_2L2, + DDP_COMPONENT_RDMA4, + DDP_COMPONENT_DPI0, +}; + static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .main_path = mt2701_mtk_ddp_main, .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main), @@ -202,6 +221,13 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .ext_len = ARRAY_SIZE(mt8183_mtk_ddp_ext), }; +static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { + .main_path = mt8192_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8192_mtk_ddp_main), + .ext_path = mt8192_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8192_mtk_ddp_ext), +}; + static int mtk_drm_kms_init(struct drm_device *drm) { struct mtk_drm_private *private = drm->dev_private; @@ -397,68 +423,36 @@ static const struct component_master_ops mtk_drm_ops = { }; static const struct of_device_id mtk_ddp_comp_dt_ids[] = { - { .compatible = "mediatek,mt2701-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8167-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8173-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl", - .data = (void *)MTK_DISP_OVL }, - { .compatible = "mediatek,mt8183-disp-ovl-2l", - .data = (void *)MTK_DISP_OVL_2L }, - { .compatible = "mediatek,mt2701-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8167-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8183-disp-rdma", - .data = (void *)MTK_DISP_RDMA }, - { .compatible = "mediatek,mt8173-disp-wdma", - .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt8167-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8173-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8183-disp-aal", + .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8192-disp-aal", + .data = (void *)MTK_DISP_AAL}, { .compatible = "mediatek,mt8167-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt8183-disp-ccorr", .data = (void *)MTK_DISP_CCORR }, + { .compatible = "mediatek,mt8192-disp-ccorr", + .data = (void *)MTK_DISP_CCORR }, { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8167-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, - { .compatible = "mediatek,mt8167-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8173-disp-aal", - .data = (void *)MTK_DISP_AAL}, - { .compatible = "mediatek,mt8183-disp-aal", - .data = (void *)MTK_DISP_AAL}, + { .compatible = "mediatek,mt8167-disp-dither", + .data = (void *)MTK_DISP_DITHER }, + { .compatible = "mediatek,mt8183-disp-dither", + .data = (void *)MTK_DISP_DITHER }, { .compatible = "mediatek,mt8167-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8183-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, - { .compatible = "mediatek,mt8167-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8183-disp-dither", - .data = (void *)MTK_DISP_DITHER }, - { .compatible = "mediatek,mt8173-disp-ufoe", - .data = (void *)MTK_DISP_UFOE }, - { .compatible = "mediatek,mt2701-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8167-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8173-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt8183-dsi", - .data = (void *)MTK_DSI }, - { .compatible = "mediatek,mt2701-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8173-dpi", - .data = (void *)MTK_DPI }, - { .compatible = "mediatek,mt8183-dpi", - .data = (void *)MTK_DPI }, { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt2712-disp-mutex", @@ -469,14 +463,60 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = { .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt8183-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8192-disp-mutex", + .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt8173-disp-od", + .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8167-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8192-disp-ovl", + .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt8183-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-ovl-2l", + .data = (void *)MTK_DISP_OVL_2L }, + { .compatible = "mediatek,mt8192-disp-postmask", + .data = (void *)MTK_DISP_POSTMASK }, { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS }, { .compatible = "mediatek,mt8167-disp-pwm", .data = (void *)MTK_DISP_PWM }, { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, - { .compatible = "mediatek,mt8173-disp-od", - .data = (void *)MTK_DISP_OD }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8167-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8183-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8192-disp-rdma", + .data = (void *)MTK_DISP_RDMA }, + { .compatible = "mediatek,mt8173-disp-ufoe", + .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt8173-disp-wdma", + .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8167-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt8183-dpi", + .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8173-dsi", + .data = (void *)MTK_DSI }, + { .compatible = "mediatek,mt8183-dsi", + .data = (void *)MTK_DSI }, { } }; @@ -493,6 +533,8 @@ static const struct of_device_id mtk_drm_of_ids[] = { .data = &mt8173_mmsys_driver_data}, { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data}, + { .compatible = "mediatek,mt8192-mmsys", + .data = &mt8192_mmsys_driver_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_drm_of_ids); @@ -568,8 +610,8 @@ static int mtk_drm_probe(struct platform_device *pdev) comp_type == MTK_DISP_OVL || comp_type == MTK_DISP_OVL_2L || comp_type == MTK_DISP_RDMA || - comp_type == MTK_DSI || - comp_type == MTK_DPI) { + comp_type == MTK_DPI || + comp_type == MTK_DSI) { dev_info(dev, "Adding component match for %pOF\n", node); drm_of_component_match_add(dev, &match, compare_of, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 734a1fb052df..075747a6d4aa 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -44,9 +44,10 @@ static void mtk_plane_reset(struct drm_plane *plane) state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return; - plane->state = &state->base; } + __drm_atomic_helper_plane_reset(plane, &state->base); + state->base.plane = plane; state->pending.format = DRM_FORMAT_RGB565; } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 93b40c245f00..5d90d2eb0019 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include <video/mipi_display.h> #include <video/videomode.h> @@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) struct mtk_dsi *dsi = dev_get_drvdata(dev); ret = mtk_dsi_encoder_init(drm, dsi); + if (ret) + return ret; - return ret; + return device_reset_optional(dev); } static void mtk_dsi_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig index 9f9281dd49f8..6c70fc3214af 100644 --- a/drivers/gpu/drm/meson/Kconfig +++ b/drivers/gpu/drm/meson/Kconfig @@ -4,11 +4,12 @@ config DRM_MESON depends on DRM && OF && (ARM || ARM64) depends on ARCH_MESON || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER + select DRM_DISPLAY_CONNECTOR select VIDEOMODE_HELPERS select REGMAP_MMIO select MESON_CANVAS + select CEC_CORE if CEC_NOTIFIER config DRM_MESON_DW_HDMI tristate "HDMI Synopsys Controller support for Amlogic Meson Display" diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile index 28a519cdf66b..3afa31bdc950 100644 --- a/drivers/gpu/drm/meson/Makefile +++ b/drivers/gpu/drm/meson/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only -meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o +meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_encoder_cvbs.o meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o meson-drm-y += meson_rdma.o meson_osd_afbcd.o +meson-drm-y += meson_encoder_hdmi.o obj-$(CONFIG_DRM_MESON) += meson-drm.o obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 7f41a33592c8..80f1d439841a 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -31,7 +31,8 @@ #include "meson_plane.h" #include "meson_osd_afbcd.h" #include "meson_registers.h" -#include "meson_venc_cvbs.h" +#include "meson_encoder_cvbs.h" +#include "meson_encoder_hdmi.h" #include "meson_viu.h" #include "meson_vpp.h" #include "meson_rdma.h" @@ -306,7 +307,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) /* Encoder Initialization */ - ret = meson_venc_cvbs_create(priv); + ret = meson_encoder_cvbs_init(priv); if (ret) goto free_drm; @@ -318,6 +319,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) } } + ret = meson_encoder_hdmi_init(priv); + if (ret) + goto free_drm; + ret = meson_plane_create(priv); if (ret) goto free_drm; @@ -426,46 +431,6 @@ static int compare_of(struct device *dev, void *data) return dev->of_node == data; } -/* Possible connectors nodes to ignore */ -static const struct of_device_id connectors_match[] = { - { .compatible = "composite-video-connector" }, - { .compatible = "svideo-connector" }, - { .compatible = "hdmi-connector" }, - { .compatible = "dvi-connector" }, - {} -}; - -static int meson_probe_remote(struct platform_device *pdev, - struct component_match **match, - struct device_node *parent, - struct device_node *remote) -{ - struct device_node *ep, *remote_node; - int count = 1; - - /* If node is a connector, return and do not add to match table */ - if (of_match_node(connectors_match, remote)) - return 1; - - component_match_add(&pdev->dev, match, compare_of, remote); - - for_each_endpoint_of_node(remote, ep) { - remote_node = of_graph_get_remote_port_parent(ep); - if (!remote_node || - remote_node == parent || /* Ignore parent endpoint */ - !of_device_is_available(remote_node)) { - of_node_put(remote_node); - continue; - } - - count += meson_probe_remote(pdev, match, remote, remote_node); - - of_node_put(remote_node); - } - - return count; -} - static void meson_drv_shutdown(struct platform_device *pdev) { struct meson_drm *priv = dev_get_drvdata(&pdev->dev); @@ -477,6 +442,13 @@ static void meson_drv_shutdown(struct platform_device *pdev) drm_atomic_helper_shutdown(priv->drm); } +/* Possible connectors nodes to ignore */ +static const struct of_device_id connectors_match[] = { + { .compatible = "composite-video-connector" }, + { .compatible = "svideo-connector" }, + {} +}; + static int meson_drv_probe(struct platform_device *pdev) { struct component_match *match = NULL; @@ -491,8 +463,21 @@ static int meson_drv_probe(struct platform_device *pdev) continue; } - count += meson_probe_remote(pdev, &match, np, remote); + /* If an analog connector is detected, count it as an output */ + if (of_match_node(connectors_match, remote)) { + ++count; + of_node_put(remote); + continue; + } + + dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n", + np, remote, dev_name(&pdev->dev)); + + component_match_add(&pdev->dev, &match, compare_of, remote); + of_node_put(remote); + + ++count; } if (count && !match) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 0afbd1e70bfc..5cd2b2ebbbd3 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -22,14 +22,11 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> -#include <linux/media-bus-format.h> #include <linux/videodev2.h> #include "meson_drv.h" #include "meson_dw_hdmi.h" #include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc.h" #define DRIVER_NAME "meson-dw-hdmi" #define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver" @@ -135,8 +132,6 @@ struct meson_dw_hdmi_data { }; struct meson_dw_hdmi { - struct drm_encoder encoder; - struct drm_bridge bridge; struct dw_hdmi_plat_data dw_plat_data; struct meson_drm *priv; struct device *dev; @@ -148,12 +143,8 @@ struct meson_dw_hdmi { struct regulator *hdmi_supply; u32 irq_stat; struct dw_hdmi *hdmi; - unsigned long output_bus_fmt; + struct drm_bridge *bridge; }; -#define encoder_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, encoder) -#define bridge_to_meson_dw_hdmi(x) \ - container_of(x, struct meson_dw_hdmi, bridge) static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi, const char *compat) @@ -295,14 +286,14 @@ static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi, /* Setup PHY bandwidth modes */ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) + const struct drm_display_mode *mode, + bool mode_is_420) { struct meson_drm *priv = dw_hdmi->priv; unsigned int pixel_clock = mode->clock; /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - pixel_clock /= 2; + if (mode_is_420) pixel_clock /= 2; if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) { @@ -374,68 +365,25 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi) mdelay(2); } -static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi, - const struct drm_display_mode *mode) -{ - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - if (!vic) { - meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, - vclk_freq, vclk_freq, vclk_freq, false); - return; - } - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", - phy_freq, vclk_freq, venc_freq, hdmi_freq, - priv->venc.hdmi_use_enci); - - meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, - venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); -} - static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *display, const struct drm_display_mode *mode) { struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data; + bool is_hdmi2_sink = display->hdmi.scdc.supported; struct meson_drm *priv = dw_hdmi->priv; unsigned int wr_clk = readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING)); + bool mode_is_420 = false; DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name, mode->clock > 340000 ? 40 : 10); + if (drm_mode_is_420_only(display, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display, mode))) + mode_is_420 = true; + /* Enable clocks */ regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); @@ -457,8 +405,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); /* TMDS pattern setup */ - if (mode->clock > 340000 && - dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) { + if (mode->clock > 340000 && !mode_is_420) { dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0); dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23, @@ -476,7 +423,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2); /* Setup PHY parameters */ - meson_hdmi_phy_setup_mode(dw_hdmi, mode); + meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); /* Setup PHY */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, @@ -622,214 +569,15 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id) dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected, hpd_connected); - drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); + drm_helper_hpd_irq_event(dw_hdmi->bridge->dev); + drm_bridge_hpd_notify(dw_hdmi->bridge, + hpd_connected ? connector_status_connected + : connector_status_disconnected); } return IRQ_HANDLED; } -static enum drm_mode_status -dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) -{ - struct meson_dw_hdmi *dw_hdmi = data; - struct meson_drm *priv = dw_hdmi->priv; - bool is_hdmi2_sink = display_info->hdmi.scdc.supported; - unsigned int phy_freq; - unsigned int vclk_freq; - unsigned int venc_freq; - unsigned int hdmi_freq; - int vic = drm_match_cea_mode(mode); - enum drm_mode_status status; - - DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); - - /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ - if (display_info->max_tmds_clock && - mode->clock > display_info->max_tmds_clock && - !drm_mode_is_420_only(display_info, mode) && - !drm_mode_is_420_also(display_info, mode)) - return MODE_BAD; - - /* Check against non-VIC supported modes */ - if (!vic) { - status = meson_venc_hdmi_supported_mode(mode); - if (status != MODE_OK) - return status; - - return meson_vclk_dmt_supported_freq(priv, mode->clock); - /* Check against supported VIC modes */ - } else if (!meson_venc_hdmi_supported_vic(vic)) - return MODE_BAD; - - vclk_freq = mode->clock; - - /* For 420, pixel clock is half unlike venc clock */ - if (drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - vclk_freq /= 2; - - /* TMDS clock is pixel_clock * 10 */ - phy_freq = vclk_freq * 10; - - /* 480i/576i needs global pixel doubling */ - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - vclk_freq *= 2; - - venc_freq = vclk_freq; - hdmi_freq = vclk_freq; - - /* VENC double pixels for 1080i, 720p and YUV420 modes */ - if (meson_venc_hdmi_venc_repeat(vic) || - drm_mode_is_420_only(display_info, mode) || - (!is_hdmi2_sink && - drm_mode_is_420_also(display_info, mode))) - venc_freq *= 2; - - vclk_freq = max(venc_freq, hdmi_freq); - - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - venc_freq /= 2; - - dev_dbg(dw_hdmi->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", - __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); - - return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); -} - -/* Encoder */ - -static const u32 meson_dw_hdmi_out_bus_fmts[] = { - MEDIA_BUS_FMT_YUV8_1X24, - MEDIA_BUS_FMT_UYYVYY8_0_5X24, -}; - -static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = { - .destroy = meson_venc_hdmi_encoder_destroy, -}; - -static u32 * -meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state, - u32 output_fmt, - unsigned int *num_input_fmts) -{ - u32 *input_fmts = NULL; - int i; - - *num_input_fmts = 0; - - for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) { - if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) { - *num_input_fmts = 1; - input_fmts = kcalloc(*num_input_fmts, - sizeof(*input_fmts), - GFP_KERNEL); - if (!input_fmts) - return NULL; - - input_fmts[0] = output_fmt; - - break; - } - } - - return input_fmts; -} - -static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - - dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; - - DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt); - - return 0; -} - -static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("\n"); - - writel_bits_relaxed(0x3, 0, - priv->io_base + _REG(VPU_HDMI_SETTING)); - - writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); - writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - - DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); - - if (priv->venc.hdmi_use_enci) - writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); - else - writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); -} - -static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge, - const struct drm_display_mode *mode, - const struct drm_display_mode *adjusted_mode) -{ - struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge); - struct meson_drm *priv = dw_hdmi->priv; - int vic = drm_match_cea_mode(mode); - unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; - bool yuv420_mode = false; - - DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { - ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; - yuv420_mode = true; - } - - /* VENC + VENC-DVI Mode setup */ - meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); - - /* VCLK Set clock */ - dw_hdmi_set_vclk(dw_hdmi, mode); - - if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) - /* Setup YUV420 to HDMI-TX, no 10bit diphering */ - writel_relaxed(2 | (2 << 2), - priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); - else - /* Setup YUV444 to HDMI-TX, no 10bit diphering */ - writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); -} - -static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = { - .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, - .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts, - .atomic_reset = drm_atomic_helper_bridge_reset, - .atomic_check = meson_venc_hdmi_encoder_atomic_check, - .enable = meson_venc_hdmi_encoder_enable, - .disable = meson_venc_hdmi_encoder_disable, - .mode_set = meson_venc_hdmi_encoder_mode_set, -}; - /* DW HDMI Regmap */ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg, @@ -876,28 +624,6 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { .dwc_write = dw_hdmi_g12a_dwc_write, }; -static bool meson_hdmi_connector_is_available(struct device *dev) -{ - struct device_node *ep, *remote; - - /* HDMI Connector is on the second port, first endpoint */ - ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, 0); - if (!ep) - return false; - - /* If the endpoint node exists, consider it enabled */ - remote = of_graph_get_remote_port(ep); - if (remote) { - of_node_put(ep); - return true; - } - - of_node_put(ep); - of_node_put(remote); - - return false; -} - static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) { struct meson_drm *priv = meson_dw_hdmi->priv; @@ -976,18 +702,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, struct drm_device *drm = data; struct meson_drm *priv = drm->dev_private; struct dw_hdmi_plat_data *dw_plat_data; - struct drm_bridge *next_bridge; - struct drm_encoder *encoder; int irq; int ret; DRM_DEBUG_DRIVER("\n"); - if (!meson_hdmi_connector_is_available(dev)) { - dev_info(drm->dev, "HDMI Output connector not available\n"); - return -ENODEV; - } - match = of_device_get_match_data(&pdev->dev); if (!match) { dev_err(&pdev->dev, "failed to get match data\n"); @@ -1003,7 +722,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, meson_dw_hdmi->dev = dev; meson_dw_hdmi->data = match; dw_plat_data = &meson_dw_hdmi->dw_plat_data; - encoder = &meson_dw_hdmi->encoder; meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi"); if (IS_ERR(meson_dw_hdmi->hdmi_supply)) { @@ -1074,34 +792,18 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, return ret; } - /* Encoder */ - - ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs, - DRM_MODE_ENCODER_TMDS, "meson_hdmi"); - if (ret) { - dev_err(priv->dev, "Failed to init HDMI encoder\n"); - return ret; - } - - meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs; - drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0); - - encoder->possible_crtcs = BIT(0); - meson_dw_hdmi_init(meson_dw_hdmi); - DRM_DEBUG_DRIVER("encoder initialized\n"); - /* Bridge / Connector */ dw_plat_data->priv_data = meson_dw_hdmi; - dw_plat_data->mode_valid = dw_hdmi_mode_valid; dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops; dw_plat_data->phy_name = "meson_dw_hdmi_phy"; dw_plat_data->phy_data = meson_dw_hdmi; dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709; dw_plat_data->ycbcr_420_allowed = true; dw_plat_data->disable_cec = true; + dw_plat_data->output_port = 1; if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || @@ -1110,15 +812,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, platform_set_drvdata(pdev, meson_dw_hdmi); - meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, - &meson_dw_hdmi->dw_plat_data); + meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data); if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); - next_bridge = of_drm_find_bridge(pdev->dev.of_node); - if (next_bridge) - drm_bridge_attach(encoder, next_bridge, - &meson_dw_hdmi->bridge, 0); + meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node); DRM_DEBUG_DRIVER("HDMI controller initialized\n"); diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c new file mode 100644 index 000000000000..fd8db97ba8ba --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + * Copyright (C) 2014 Endless Mobile + * + * Written by: + * Jasper St. Pierre <jstpierre@mecheye.net> + */ + +#include <linux/export.h> +#include <linux/of_graph.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_encoder_cvbs.h" + +/* HHI VDAC Registers */ +#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ +#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ +#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ + +struct meson_encoder_cvbs { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct meson_drm *priv; +}; + +#define bridge_to_meson_encoder_cvbs(x) \ + container_of(x, struct meson_encoder_cvbs, bridge) + +/* Supported Modes */ + +struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { + { /* PAL */ + .enci = &meson_cvbs_enci_pal, + .mode = { + DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, + 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, + DRM_MODE_FLAG_INTERLACE), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, + }, + }, + { /* NTSC */ + .enci = &meson_cvbs_enci_ntsc, + .mode = { + DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, + 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, + DRM_MODE_FLAG_INTERLACE), + .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, + }, + }, +}; + +static const struct meson_cvbs_mode * +meson_cvbs_get_mode(const struct drm_display_mode *req_mode) +{ + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + if (drm_mode_match(req_mode, &meson_mode->mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_CLOCK | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS)) + return meson_mode; + } + + return NULL; +} + +static int meson_encoder_cvbs_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + + return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge, + &meson_encoder_cvbs->bridge, flags); +} + +static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + struct meson_drm *priv = meson_encoder_cvbs->priv; + struct drm_display_mode *mode; + int i; + + for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { + struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; + + mode = drm_mode_duplicate(priv->drm, &meson_mode->mode); + if (!mode) { + dev_err(priv->dev, "Failed to create a new display mode\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + } + + return i; +} + +static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + if (meson_cvbs_get_mode(mode)) + return MODE_OK; + + return MODE_BAD; +} + +static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + if (meson_cvbs_get_mode(&crtc_state->mode)) + return 0; + + return -EINVAL; +} + +static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + struct meson_drm *priv = encoder_cvbs->priv; + const struct meson_cvbs_mode *meson_mode; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + meson_mode = meson_cvbs_get_mode(&crtc_state->adjusted_mode); + if (WARN_ON(!meson_mode)) + return; + + meson_venci_cvbs_mode_set(priv, meson_mode->enci); + + /* Setup 27MHz vclk2 for ENCI and VDAC */ + meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, + MESON_VCLK_CVBS, MESON_VCLK_CVBS, + true); + + /* VDAC0 source is not from ATV */ + writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0, + priv->io_base + _REG(VENC_VDAC_DACSEL0)); + + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001); + regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); + } +} + +static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_cvbs *meson_encoder_cvbs = + bridge_to_meson_encoder_cvbs(bridge); + struct meson_drm *priv = meson_encoder_cvbs->priv; + + /* Disable CVBS VDAC */ + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); + } else { + regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); + regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); + } +} + +static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = { + .attach = meson_encoder_cvbs_attach, + .mode_valid = meson_encoder_cvbs_mode_valid, + .get_modes = meson_encoder_cvbs_get_modes, + .atomic_enable = meson_encoder_cvbs_atomic_enable, + .atomic_disable = meson_encoder_cvbs_atomic_disable, + .atomic_check = meson_encoder_cvbs_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_cvbs_init(struct meson_drm *priv) +{ + struct drm_device *drm = priv->drm; + struct meson_encoder_cvbs *meson_encoder_cvbs; + struct drm_connector *connector; + struct device_node *remote; + int ret; + + meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL); + if (!meson_encoder_cvbs) + return -ENOMEM; + + /* CVBS Connector Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0); + if (!remote) { + dev_info(drm->dev, "CVBS Output connector not available\n"); + return 0; + } + + meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_cvbs->next_bridge) { + dev_err(priv->dev, "Failed to find CVBS Connector bridge\n"); + return -EPROBE_DEFER; + } + + /* CVBS Encoder Bridge */ + meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs; + meson_encoder_cvbs->bridge.of_node = priv->dev->of_node; + meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite; + meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES; + meson_encoder_cvbs->bridge.interlace_allowed = true; + + drm_bridge_add(&meson_encoder_cvbs->bridge); + + meson_encoder_cvbs->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder, + DRM_MODE_ENCODER_TVDAC); + if (ret) { + dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret); + return ret; + } + + meson_encoder_cvbs->encoder.possible_crtcs = BIT(0); + + /* Attach CVBS Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_cvbs->encoder, &meson_encoder_cvbs->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* Initialize & attach Bridge Connector */ + connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder); + if (IS_ERR(connector)) { + dev_err(priv->dev, "Unable to create CVBS bridge connector\n"); + return PTR_ERR(connector); + } + drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.h b/drivers/gpu/drm/meson/meson_encoder_cvbs.h index ab7f76ba469c..61d9d183ce7f 100644 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.h +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.h @@ -24,6 +24,6 @@ struct meson_cvbs_mode { /* Modes supported by the CVBS output */ extern struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT]; -int meson_venc_cvbs_create(struct meson_drm *priv); +int meson_encoder_cvbs_init(struct meson_drm *priv); #endif /* __MESON_VENC_CVBS_H */ diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c new file mode 100644 index 000000000000..5e306de6f485 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -0,0 +1,447 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2016 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + * Copyright (C) 2015 Amlogic, Inc. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> + +#include <media/cec-notifier.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_edid.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_vclk.h" +#include "meson_venc.h" +#include "meson_encoder_hdmi.h" + +struct meson_encoder_hdmi { + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector *connector; + struct meson_drm *priv; + unsigned long output_bus_fmt; + struct cec_notifier *cec_notifier; +}; + +#define bridge_to_meson_encoder_hdmi(x) \ + container_of(x, struct meson_encoder_hdmi, bridge) + +static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + &encoder_hdmi->bridge, flags); +} + +static void meson_encoder_hdmi_detach(struct drm_bridge *bridge) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + + cec_notifier_conn_unregister(encoder_hdmi->cec_notifier); + encoder_hdmi->cec_notifier = NULL; +} + +static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi, + const struct drm_display_mode *mode) +{ + struct meson_drm *priv = encoder_hdmi->priv; + int vic = drm_match_cea_mode(mode); + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + if (!vic) { + meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq, + vclk_freq, vclk_freq, vclk_freq, false); + return; + } + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n", + phy_freq, vclk_freq, venc_freq, hdmi_freq, + priv->venc.hdmi_use_enci); + + meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq, + venc_freq, hdmi_freq, priv->venc.hdmi_use_enci); +} + +static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + bool is_hdmi2_sink = display_info->hdmi.scdc.supported; + unsigned int phy_freq; + unsigned int vclk_freq; + unsigned int venc_freq; + unsigned int hdmi_freq; + int vic = drm_match_cea_mode(mode); + enum drm_mode_status status; + + dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); + + /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */ + if (display_info->max_tmds_clock && + mode->clock > display_info->max_tmds_clock && + !drm_mode_is_420_only(display_info, mode) && + !drm_mode_is_420_also(display_info, mode)) + return MODE_BAD; + + /* Check against non-VIC supported modes */ + if (!vic) { + status = meson_venc_hdmi_supported_mode(mode); + if (status != MODE_OK) + return status; + + return meson_vclk_dmt_supported_freq(priv, mode->clock); + /* Check against supported VIC modes */ + } else if (!meson_venc_hdmi_supported_vic(vic)) + return MODE_BAD; + + vclk_freq = mode->clock; + + /* For 420, pixel clock is half unlike venc clock */ + if (drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + vclk_freq /= 2; + + /* TMDS clock is pixel_clock * 10 */ + phy_freq = vclk_freq * 10; + + /* 480i/576i needs global pixel doubling */ + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + vclk_freq *= 2; + + venc_freq = vclk_freq; + hdmi_freq = vclk_freq; + + /* VENC double pixels for 1080i, 720p and YUV420 modes */ + if (meson_venc_hdmi_venc_repeat(vic) || + drm_mode_is_420_only(display_info, mode) || + (!is_hdmi2_sink && + drm_mode_is_420_also(display_info, mode))) + venc_freq *= 2; + + vclk_freq = max(venc_freq, hdmi_freq); + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + venc_freq /= 2; + + dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n", + __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq); + + return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq); +} + +static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_atomic_state *state = bridge_state->base.state; + unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR; + struct meson_drm *priv = encoder_hdmi->priv; + struct drm_connector_state *conn_state; + const struct drm_display_mode *mode; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + bool yuv420_mode = false; + int vic; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + vic = drm_match_cea_mode(mode); + + dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) { + ycrcb_map = VPU_HDMI_OUTPUT_CRYCB; + yuv420_mode = true; + } + + /* VENC + VENC-DVI Mode setup */ + meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode); + + /* VCLK Set clock */ + meson_encoder_hdmi_set_vclk(encoder_hdmi, mode); + + if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) + /* Setup YUV420 to HDMI-TX, no 10bit diphering */ + writel_relaxed(2 | (2 << 2), + priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + else + /* Setup YUV444 to HDMI-TX, no 10bit diphering */ + writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL)); + + dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP"); + + if (priv->venc.hdmi_use_enci) + writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN)); + else + writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct meson_drm *priv = encoder_hdmi->priv; + + writel_bits_relaxed(0x3, 0, + priv->io_base + _REG(VPU_HDMI_SETTING)); + + writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN)); + writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN)); +} + +static const u32 meson_encoder_hdmi_out_bus_fmts[] = { + MEDIA_BUS_FMT_YUV8_1X24, + MEDIA_BUS_FMT_UYYVYY8_0_5X24, +}; + +static u32 * +meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + u32 *input_fmts = NULL; + int i; + + *num_input_fmts = 0; + + for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) { + if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) { + *num_input_fmts = 1; + input_fmts = kcalloc(*num_input_fmts, + sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + input_fmts[0] = output_fmt; + + break; + } + } + + return input_fmts; +} + +static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector); + struct meson_drm *priv = encoder_hdmi->priv; + + encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format; + + dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt); + + if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state)) + crtc_state->mode_changed = true; + + return 0; +} + +static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge, + enum drm_connector_status status) +{ + struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); + struct edid *edid; + + if (!encoder_hdmi->cec_notifier) + return; + + if (status == connector_status_connected) { + edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector); + if (!edid) + return; + + cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid); + } else + cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier); +} + +static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = { + .attach = meson_encoder_hdmi_attach, + .detach = meson_encoder_hdmi_detach, + .mode_valid = meson_encoder_hdmi_mode_valid, + .hpd_notify = meson_encoder_hdmi_hpd_notify, + .atomic_enable = meson_encoder_hdmi_atomic_enable, + .atomic_disable = meson_encoder_hdmi_atomic_disable, + .atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts, + .atomic_check = meson_encoder_hdmi_atomic_check, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +int meson_encoder_hdmi_init(struct meson_drm *priv) +{ + struct meson_encoder_hdmi *meson_encoder_hdmi; + struct platform_device *pdev; + struct device_node *remote; + int ret; + + meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL); + if (!meson_encoder_hdmi) + return -ENOMEM; + + /* HDMI Transceiver Bridge */ + remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0); + if (!remote) { + dev_err(priv->dev, "HDMI transceiver device is disabled"); + return 0; + } + + meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote); + if (!meson_encoder_hdmi->next_bridge) { + dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n"); + return -EPROBE_DEFER; + } + + /* HDMI Encoder Bridge */ + meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs; + meson_encoder_hdmi->bridge.of_node = priv->dev->of_node; + meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA; + meson_encoder_hdmi->bridge.interlace_allowed = true; + + drm_bridge_add(&meson_encoder_hdmi->bridge); + + meson_encoder_hdmi->priv = priv; + + /* Encoder */ + ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder, + DRM_MODE_ENCODER_TMDS); + if (ret) { + dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret); + return ret; + } + + meson_encoder_hdmi->encoder.possible_crtcs = BIT(0); + + /* Attach HDMI Encoder Bridge to Encoder */ + ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) { + dev_err(priv->dev, "Failed to attach bridge: %d\n", ret); + return ret; + } + + /* Initialize & attach Bridge Connector */ + meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm, + &meson_encoder_hdmi->encoder); + if (IS_ERR(meson_encoder_hdmi->connector)) { + dev_err(priv->dev, "Unable to create HDMI bridge connector\n"); + return PTR_ERR(meson_encoder_hdmi->connector); + } + drm_connector_attach_encoder(meson_encoder_hdmi->connector, + &meson_encoder_hdmi->encoder); + + /* + * We should have now in place: + * encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[display connector bridge]->[display connector] + */ + + /* + * drm_connector_attach_max_bpc_property() requires the + * connector to have a state. + */ + drm_atomic_helper_connector_reset(meson_encoder_hdmi->connector); + + if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) + drm_connector_attach_hdr_output_metadata_property(meson_encoder_hdmi->connector); + + drm_connector_attach_max_bpc_property(meson_encoder_hdmi->connector, 8, 8); + + /* Handle this here until handled by drm_bridge_connector_init() */ + meson_encoder_hdmi->connector->ycbcr_420_allowed = true; + + pdev = of_find_device_by_node(remote); + if (pdev) { + struct cec_connector_info conn_info; + struct cec_notifier *notifier; + + cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector); + + notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info); + if (!notifier) + return -ENOMEM; + + meson_encoder_hdmi->cec_notifier = notifier; + } + + dev_dbg(priv->dev, "HDMI encoder initialized\n"); + + return 0; +} diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.h b/drivers/gpu/drm/meson/meson_encoder_hdmi.h new file mode 100644 index 000000000000..ed19494f0956 --- /dev/null +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef __MESON_ENCODER_HDMI_H +#define __MESON_ENCODER_HDMI_H + +int meson_encoder_hdmi_init(struct meson_drm *priv); + +#endif /* __MESON_ENCODER_HDMI_H */ diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c deleted file mode 100644 index f1747fde1fe0..000000000000 --- a/drivers/gpu/drm/meson/meson_venc_cvbs.c +++ /dev/null @@ -1,293 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright (C) 2016 BayLibre, SAS - * Author: Neil Armstrong <narmstrong@baylibre.com> - * Copyright (C) 2015 Amlogic, Inc. All rights reserved. - * Copyright (C) 2014 Endless Mobile - * - * Written by: - * Jasper St. Pierre <jstpierre@mecheye.net> - */ - -#include <linux/export.h> -#include <linux/of_graph.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_device.h> -#include <drm/drm_edid.h> -#include <drm/drm_probe_helper.h> -#include <drm/drm_print.h> - -#include "meson_registers.h" -#include "meson_vclk.h" -#include "meson_venc_cvbs.h" - -/* HHI VDAC Registers */ -#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */ -#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */ -#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */ -#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */ - -struct meson_venc_cvbs { - struct drm_encoder encoder; - struct drm_connector connector; - struct meson_drm *priv; -}; -#define encoder_to_meson_venc_cvbs(x) \ - container_of(x, struct meson_venc_cvbs, encoder) - -#define connector_to_meson_venc_cvbs(x) \ - container_of(x, struct meson_venc_cvbs, connector) - -/* Supported Modes */ - -struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = { - { /* PAL */ - .enci = &meson_cvbs_enci_pal, - .mode = { - DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, - 720, 732, 795, 864, 0, 576, 580, 586, 625, 0, - DRM_MODE_FLAG_INTERLACE), - .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, - }, - }, - { /* NTSC */ - .enci = &meson_cvbs_enci_ntsc, - .mode = { - DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, - 720, 739, 801, 858, 0, 480, 488, 494, 525, 0, - DRM_MODE_FLAG_INTERLACE), - .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, - }, - }, -}; - -static const struct meson_cvbs_mode * -meson_cvbs_get_mode(const struct drm_display_mode *req_mode) -{ - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - if (drm_mode_match(req_mode, &meson_mode->mode, - DRM_MODE_MATCH_TIMINGS | - DRM_MODE_MATCH_CLOCK | - DRM_MODE_MATCH_FLAGS | - DRM_MODE_MATCH_3D_FLAGS)) - return meson_mode; - } - - return NULL; -} - -/* Connector */ - -static void meson_cvbs_connector_destroy(struct drm_connector *connector) -{ - drm_connector_cleanup(connector); -} - -static enum drm_connector_status -meson_cvbs_connector_detect(struct drm_connector *connector, bool force) -{ - /* FIXME: Add load-detect or jack-detect if possible */ - return connector_status_connected; -} - -static int meson_cvbs_connector_get_modes(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_display_mode *mode; - int i; - - for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) { - struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i]; - - mode = drm_mode_duplicate(dev, &meson_mode->mode); - if (!mode) { - DRM_ERROR("Failed to create a new display mode\n"); - return 0; - } - - drm_mode_probed_add(connector, mode); - } - - return i; -} - -static int meson_cvbs_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - /* Validate the modes added in get_modes */ - return MODE_OK; -} - -static const struct drm_connector_funcs meson_cvbs_connector_funcs = { - .detect = meson_cvbs_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = meson_cvbs_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const -struct drm_connector_helper_funcs meson_cvbs_connector_helper_funcs = { - .get_modes = meson_cvbs_connector_get_modes, - .mode_valid = meson_cvbs_connector_mode_valid, -}; - -/* Encoder */ - -static void meson_venc_cvbs_encoder_destroy(struct drm_encoder *encoder) -{ - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs meson_venc_cvbs_encoder_funcs = { - .destroy = meson_venc_cvbs_encoder_destroy, -}; - -static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - if (meson_cvbs_get_mode(&crtc_state->mode)) - return 0; - - return -EINVAL; -} - -static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder) -{ - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - /* Disable CVBS VDAC */ - if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); - } else { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8); - } -} - -static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder) -{ - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - /* VDAC0 source is not from ATV */ - writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0, - priv->io_base + _REG(VENC_VDAC_DACSEL0)); - - if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); - } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001); - regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0); - } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001); - regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0); - } -} - -static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode); - struct meson_venc_cvbs *meson_venc_cvbs = - encoder_to_meson_venc_cvbs(encoder); - struct meson_drm *priv = meson_venc_cvbs->priv; - - if (meson_mode) { - meson_venci_cvbs_mode_set(priv, meson_mode->enci); - - /* Setup 27MHz vclk2 for ENCI and VDAC */ - meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - MESON_VCLK_CVBS, MESON_VCLK_CVBS, - true); - } -} - -static const struct drm_encoder_helper_funcs - meson_venc_cvbs_encoder_helper_funcs = { - .atomic_check = meson_venc_cvbs_encoder_atomic_check, - .disable = meson_venc_cvbs_encoder_disable, - .enable = meson_venc_cvbs_encoder_enable, - .mode_set = meson_venc_cvbs_encoder_mode_set, -}; - -static bool meson_venc_cvbs_connector_is_available(struct meson_drm *priv) -{ - struct device_node *remote; - - remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0); - if (!remote) - return false; - - of_node_put(remote); - return true; -} - -int meson_venc_cvbs_create(struct meson_drm *priv) -{ - struct drm_device *drm = priv->drm; - struct meson_venc_cvbs *meson_venc_cvbs; - struct drm_connector *connector; - struct drm_encoder *encoder; - int ret; - - if (!meson_venc_cvbs_connector_is_available(priv)) { - dev_info(drm->dev, "CVBS Output connector not available\n"); - return 0; - } - - meson_venc_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_venc_cvbs), - GFP_KERNEL); - if (!meson_venc_cvbs) - return -ENOMEM; - - meson_venc_cvbs->priv = priv; - encoder = &meson_venc_cvbs->encoder; - connector = &meson_venc_cvbs->connector; - - /* Connector */ - - drm_connector_helper_add(connector, - &meson_cvbs_connector_helper_funcs); - - ret = drm_connector_init(drm, connector, &meson_cvbs_connector_funcs, - DRM_MODE_CONNECTOR_Composite); - if (ret) { - dev_err(priv->dev, "Failed to init CVBS connector\n"); - return ret; - } - - connector->interlace_allowed = 1; - - /* Encoder */ - - drm_encoder_helper_add(encoder, &meson_venc_cvbs_encoder_helper_funcs); - - ret = drm_encoder_init(drm, encoder, &meson_venc_cvbs_encoder_funcs, - DRM_MODE_ENCODER_TVDAC, "meson_venc_cvbs"); - if (ret) { - dev_err(priv->dev, "Failed to init CVBS encoder\n"); - return ret; - } - - encoder->possible_crtcs = BIT(0); - - drm_connector_attach_encoder(connector, encoder); - - return 0; -} diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 4fd4de16cd32..894472921c30 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c @@ -38,16 +38,18 @@ typedef struct drm32_mga_init { int func; u32 sarea_priv_offset; - int chipset; - int sgram; - unsigned int maccess; - unsigned int fb_cpp; - unsigned int front_offset, front_pitch; - unsigned int back_offset, back_pitch; - unsigned int depth_cpp; - unsigned int depth_offset, depth_pitch; - unsigned int texture_offset[MGA_NR_TEX_HEAPS]; - unsigned int texture_size[MGA_NR_TEX_HEAPS]; + struct_group(always32bit, + int chipset; + int sgram; + unsigned int maccess; + unsigned int fb_cpp; + unsigned int front_offset, front_pitch; + unsigned int back_offset, back_pitch; + unsigned int depth_cpp; + unsigned int depth_offset, depth_pitch; + unsigned int texture_offset[MGA_NR_TEX_HEAPS]; + unsigned int texture_size[MGA_NR_TEX_HEAPS]; + ); u32 fb_offset; u32 mmio_offset; u32 status_offset; @@ -67,9 +69,8 @@ static int compat_mga_init(struct file *file, unsigned int cmd, init.func = init32.func; init.sarea_priv_offset = init32.sarea_priv_offset; - memcpy(&init.chipset, &init32.chipset, - offsetof(drm_mga_init_t, fb_offset) - - offsetof(drm_mga_init_t, chipset)); + memcpy(&init.always32bit, &init32.always32bit, + sizeof(init32.always32bit)); init.fb_offset = init32.fb_offset; init.mmio_offset = init32.mmio_offset; init.status_offset = init32.status_offset; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 6b9243713b3c..740108a006ba 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -6,7 +6,6 @@ * Dave Airlie */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vmalloc.h> @@ -378,7 +377,7 @@ static struct pci_driver mgag200_pci_driver = { static int __init mgag200_init(void) { - if (vgacon_text_force() && mgag200_modeset == -1) + if (drm_firmware_drivers_only() && mgag200_modeset == -1) return -EINVAL; if (mgag200_modeset == 0) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fd98e8bbc550..b983541a4c53 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -847,9 +847,11 @@ static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, struct drm_rect *clip, const struct dma_buf_map *map) { + void __iomem *dst = mdev->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ - drm_fb_memcpy_dstclip(mdev->vram, fb->pitches[0], vmap, fb, clip); + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, clip); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, clip); /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5879f67bc88c..39197b4beea7 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,8 +4,8 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST + depends on COMMON_CLK depends on IOMMU_SUPPORT - depends on (OF && COMMON_CLK) || COMPILE_TEST depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n @@ -14,10 +14,12 @@ config DRM_MSM select REGULATOR select DRM_KMS_HELPER select DRM_PANEL + select DRM_BRIDGE + select DRM_PANEL_BRIDGE select DRM_SCHED select SHMEM select TMPFS - select QCOM_SCM if ARCH_QCOM + select QCOM_SCM select WANT_DEV_COREDUMP select SND_SOC_HDMI_CODEC if SND_SOC select SYNC_FILE @@ -55,7 +57,7 @@ config DRM_MSM_GPU_SUDO config DRM_MSM_HDMI_HDCP bool "Enable HDMI HDCP support in MSM DRM driver" - depends on DRM_MSM && QCOM_SCM + depends on DRM_MSM default y help Choose this option to enable HDCP state machine diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 904535eda0c4..093454457545 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,8 +23,10 @@ msm-y := \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8996.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + hdmi/hdmi_pll_8960.o \ edp/edp.o \ edp/edp_aux.o \ edp/edp_bridge.o \ @@ -37,6 +39,7 @@ msm-y := \ disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \ disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_lvds_pll.o \ disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_plane.o \ @@ -51,7 +54,6 @@ msm-y := \ disp/mdp5/mdp5_mixer.o \ disp/mdp5/mdp5_plane.o \ disp/mdp5/mdp5_smp.o \ - disp/dpu1/dpu_core_irq.o \ disp/dpu1/dpu_core_perf.o \ disp/dpu1/dpu_crtc.o \ disp/dpu1/dpu_encoder.o \ @@ -117,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_audio.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 4534633fe7cd..8fb847c174ff 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -571,13 +571,14 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) } icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); - ret = IS_ERR(icc_path); - if (ret) + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); goto fail; + } ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); - ret = IS_ERR(ocmem_icc_path); - if (ret) { + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); /* allow -ENODATA, ocmem icc is optional */ if (ret != -ENODATA) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 82bebb40234d..a96ee79cc5e0 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -699,13 +699,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) } icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); - ret = IS_ERR(icc_path); - if (ret) + if (IS_ERR(icc_path)) { + ret = PTR_ERR(icc_path); goto fail; + } ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem"); - ret = IS_ERR(ocmem_icc_path); - if (ret) { + if (IS_ERR(ocmem_icc_path)) { + ret = PTR_ERR(ocmem_icc_path); /* allow -ENODATA, ocmem icc is optional */ if (ret != -ENODATA) goto fail; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c index c9d11d57aed6..dd593ec2bc56 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -138,7 +138,7 @@ reset_set(void *data, u64 val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) @@ -154,6 +154,6 @@ void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) ARRAY_SIZE(a5xx_debugfs_list), minor->debugfs_root, minor); - debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev, - &reset_fops); + debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev, + &reset_fops); } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index a7c58018959f..71e52b2b2025 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -296,6 +296,8 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) u32 val; int request, ack; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -337,6 +339,8 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { int bit; + WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -512,11 +516,11 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); - void __iomem *seqptr; + void __iomem *seqptr = NULL; uint32_t pdc_address_offset; bool pdc_in_aop = false; - if (!pdcptr) + if (IS_ERR(pdcptr)) goto err; if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu)) @@ -528,7 +532,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) if (!pdc_in_aop) { seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); - if (!seqptr) + if (IS_ERR(seqptr)) goto err; } @@ -887,7 +891,7 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ @@ -901,7 +905,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); - if (IS_ERR_OR_NULL(gpu_opp)) + if (IS_ERR(gpu_opp)) return; dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp); @@ -1482,6 +1486,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (!pdev) return -ENODEV; + mutex_init(&gmu->lock); + gmu->dev = &pdev->dev; of_dma_configure(gmu->dev, node, true); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 3c74f64e3126..84bd516f01e8 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -44,6 +44,9 @@ struct a6xx_gmu_bo { struct a6xx_gmu { struct device *dev; + /* For serializing communication with the GMU: */ + struct mutex lock; + struct msm_gem_address_space *aspace; void * __iomem mmio; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 40c9fef457a4..78aad5216a61 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0); - if (ctx == a6xx_gpu->cur_ctx) + if (ctx->seqno == a6xx_gpu->cur_ctx_seqno) return; if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid)) @@ -139,7 +139,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, 0x31); - a6xx_gpu->cur_ctx = ctx; + a6xx_gpu->cur_ctx_seqno = ctx->seqno; } static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) @@ -881,7 +881,7 @@ static int a6xx_zap_shader_init(struct msm_gpu *gpu) A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR) -static int a6xx_hw_init(struct msm_gpu *gpu) +static int hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); @@ -1081,7 +1081,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu) /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0]; - a6xx_gpu->cur_ctx = NULL; + a6xx_gpu->cur_ctx_seqno = 0; /* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1); @@ -1135,6 +1135,19 @@ out: return ret; } +static int a6xx_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + int ret; + + mutex_lock(&a6xx_gpu->gmu.lock); + ret = hw_init(gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); + + return ret; +} + static void a6xx_dump(struct msm_gpu *gpu) { DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", @@ -1411,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 gpu_scid, cntl1_regval = 0; + u32 cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | (gpu_scid << 15) | (gpu_scid << 20); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } /* @@ -1458,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); - - /* On A660, the SCID programming for UCHE traffic is done in - * A6XX_GBIF_SCACHE_CNTL0[14:10] - */ - if (adreno_is_a660_family(adreno_gpu)) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | - (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1509,7 +1522,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu) trace_msm_gpu_resume(0); + mutex_lock(&a6xx_gpu->gmu.lock); ret = a6xx_gmu_resume(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); if (ret) return ret; @@ -1532,7 +1547,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu) msm_devfreq_suspend(gpu); + mutex_lock(&a6xx_gpu->gmu.lock); ret = a6xx_gmu_stop(a6xx_gpu); + mutex_unlock(&a6xx_gpu->gmu.lock); if (ret) return ret; @@ -1547,18 +1564,19 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); - static DEFINE_MUTEX(perfcounter_oob); - mutex_lock(&perfcounter_oob); + mutex_lock(&a6xx_gpu->gmu.lock); /* Force the GPU power on so we can read this register */ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO, - REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); + REG_A6XX_CP_ALWAYS_ON_COUNTER_HI); a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET); - mutex_unlock(&perfcounter_oob); + + mutex_unlock(&a6xx_gpu->gmu.lock); + return 0; } @@ -1622,6 +1640,16 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } +static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); + + mutex_lock(&a6xx_gpu->gmu.lock); + a6xx_gmu_set_freq(gpu, opp); + mutex_unlock(&a6xx_gpu->gmu.lock); +} + static struct msm_gem_address_space * a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev) { @@ -1766,7 +1794,7 @@ static const struct adreno_gpu_funcs funcs = { #endif .gpu_busy = a6xx_gpu_busy, .gpu_get_freq = a6xx_gmu_get_freq, - .gpu_set_freq = a6xx_gmu_set_freq, + .gpu_set_freq = a6xx_gpu_set_freq, #if defined(CONFIG_DRM_MSM_GPU_STATE) .gpu_state_get = a6xx_gpu_state_get, .gpu_state_put = a6xx_gpu_state_put, @@ -1810,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev))) adreno_gpu->base.hw_apriv = true; + /* + * For now only clamp to idle freq for devices where this is known not + * to cause power supply issues: + */ + if (info && (info->revn == 618)) + gpu->clamp_to_idle = true; + a6xx_llc_slices_init(pdev, a6xx_gpu); ret = a6xx_set_supported_hw(&pdev->dev, config->rev); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 0bc2d062f54a..8e5527c881b1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -19,7 +19,16 @@ struct a6xx_gpu { uint64_t sqe_iova; struct msm_ringbuffer *cur_ring; - struct msm_file_private *cur_ctx; + + /** + * cur_ctx_seqno: + * + * The ctx->seqno value of the context with current pgtables + * installed. Tracked by seqno rather than pointer value to + * avoid dangling pointers, and cases where a ctx can be freed + * and a new one created with the same address. + */ + int cur_ctx_seqno; struct a6xx_gmu gmu; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e8f65cd8eca6..6e90209cd543 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -180,7 +180,7 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, msm_readl((ptr) + ((offset) << 2)) /* read a value from the CX debug bus */ -static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset, +static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset, u32 *data) { u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) | @@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); a6xx_state->gmu_registers = state_kcalloc(a6xx_state, - 2, sizeof(*a6xx_state->gmu_registers)); + 3, sizeof(*a6xx_state->gmu_registers)); if (!a6xx_state->gmu_registers) return; - a6xx_state->nr_gmu_registers = 2; + a6xx_state->nr_gmu_registers = 3; /* Get the CX GMU registers from AHB */ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c deleted file mode 100644 index d2457490930b..000000000000 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. - */ - -#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ - -#include <linux/debugfs.h> -#include <linux/irqdomain.h> -#include <linux/irq.h> -#include <linux/kthread.h> - -#include "dpu_core_irq.h" -#include "dpu_trace.h" - -/** - * dpu_core_irq_callback_handler - dispatch core interrupts - * @arg: private data of callback handler - * @irq_idx: interrupt index - */ -static void dpu_core_irq_callback_handler(void *arg, int irq_idx) -{ - struct dpu_kms *dpu_kms = arg; - struct dpu_irq *irq_obj = &dpu_kms->irq_obj; - struct dpu_irq_callback *cb; - - VERB("irq_idx=%d\n", irq_idx); - - if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) - DRM_ERROR("no registered cb, idx:%d\n", irq_idx); - - atomic_inc(&irq_obj->irq_counts[irq_idx]); - - /* - * Perform registered function callback - */ - list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list) - if (cb->func) - cb->func(cb->arg, irq_idx); -} - -u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) -{ - if (!dpu_kms->hw_intr || - !dpu_kms->hw_intr->ops.get_interrupt_status) - return 0; - - if (irq_idx < 0) { - DPU_ERROR("[%pS] invalid irq_idx=%d\n", - __builtin_return_address(0), irq_idx); - return 0; - } - - return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr, - irq_idx, clear); -} - -int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, - struct dpu_irq_callback *register_irq_cb) -{ - unsigned long irq_flags; - - if (!dpu_kms->irq_obj.irq_cb_tbl) { - DPU_ERROR("invalid params\n"); - return -EINVAL; - } - - if (!register_irq_cb || !register_irq_cb->func) { - DPU_ERROR("invalid irq_cb:%d func:%d\n", - register_irq_cb != NULL, - register_irq_cb ? - register_irq_cb->func != NULL : -1); - return -EINVAL; - } - - if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { - DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); - return -EINVAL; - } - - VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); - - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); - list_del_init(®ister_irq_cb->list); - list_add_tail(®ister_irq_cb->list, - &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]); - if (list_is_first(®ister_irq_cb->list, - &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) { - int ret = dpu_kms->hw_intr->ops.enable_irq_locked( - dpu_kms->hw_intr, - irq_idx); - if (ret) - DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", - irq_idx); - } - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - return 0; -} - -int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, - struct dpu_irq_callback *register_irq_cb) -{ - unsigned long irq_flags; - - if (!dpu_kms->irq_obj.irq_cb_tbl) { - DPU_ERROR("invalid params\n"); - return -EINVAL; - } - - if (!register_irq_cb || !register_irq_cb->func) { - DPU_ERROR("invalid irq_cb:%d func:%d\n", - register_irq_cb != NULL, - register_irq_cb ? - register_irq_cb->func != NULL : -1); - return -EINVAL; - } - - if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { - DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); - return -EINVAL; - } - - VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); - - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); - list_del_init(®ister_irq_cb->list); - /* empty callback list but interrupt is still enabled */ - if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) { - int ret = dpu_kms->hw_intr->ops.disable_irq_locked( - dpu_kms->hw_intr, - irq_idx); - if (ret) - DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n", - irq_idx); - VERB("irq_idx=%d ret=%d\n", irq_idx, ret); - } - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - return 0; -} - -static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms) -{ - if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs) - return; - - dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr); -} - -static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) -{ - if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs) - return; - - dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr); -} - -#ifdef CONFIG_DEBUG_FS -static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) -{ - struct dpu_kms *dpu_kms = s->private; - struct dpu_irq *irq_obj = &dpu_kms->irq_obj; - struct dpu_irq_callback *cb; - unsigned long irq_flags; - int i, irq_count, cb_count; - - if (WARN_ON(!irq_obj->irq_cb_tbl)) - return 0; - - for (i = 0; i < irq_obj->total_irqs; i++) { - irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr); - cb_count = 0; - irq_count = atomic_read(&irq_obj->irq_counts[i]); - list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list) - cb_count++; - dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags); - - if (irq_count || cb_count) - seq_printf(s, "idx:%d irq:%d cb:%d\n", - i, irq_count, cb_count); - } - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); - -void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, - struct dentry *parent) -{ - debugfs_create_file("core_irq", 0600, parent, dpu_kms, - &dpu_debugfs_core_irq_fops); -} -#endif - -void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) -{ - int i; - - pm_runtime_get_sync(&dpu_kms->pdev->dev); - dpu_clear_all_irqs(dpu_kms); - dpu_disable_all_irqs(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); - - /* Create irq callbacks for all possible irq_idx */ - dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs; - dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs, - sizeof(struct list_head), GFP_KERNEL); - dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs, - sizeof(atomic_t), GFP_KERNEL); - for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) { - INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]); - atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0); - } -} - -void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) -{ - int i; - - pm_runtime_get_sync(&dpu_kms->pdev->dev); - for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) - if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i])) - DPU_ERROR("irq_idx=%d still enabled/registered\n", i); - - dpu_clear_all_irqs(dpu_kms); - dpu_disable_all_irqs(dpu_kms); - pm_runtime_put_sync(&dpu_kms->pdev->dev); - - kfree(dpu_kms->irq_obj.irq_cb_tbl); - kfree(dpu_kms->irq_obj.irq_counts); - dpu_kms->irq_obj.irq_cb_tbl = NULL; - dpu_kms->irq_obj.irq_counts = NULL; - dpu_kms->irq_obj.total_irqs = 0; -} - -irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms) -{ - /* - * Dispatch to HW driver to handle interrupt lookup that is being - * fired. When matching interrupt is located, HW driver will call to - * dpu_core_irq_callback_handler with the irq_idx from the lookup table. - * dpu_core_irq_callback_handler will perform the registered function - * callback, and do the interrupt status clearing once the registered - * callback is finished. - * Function will also clear the interrupt status after reading. - */ - dpu_kms->hw_intr->ops.dispatch_irqs( - dpu_kms->hw_intr, - dpu_core_irq_callback_handler, - dpu_kms); - - return IRQ_HANDLED; -} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 768012243b44..967245b8cc02 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> */ @@ -70,17 +70,147 @@ static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc) return NULL; } -static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) +static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name) { - struct drm_encoder *encoder; + if (!src_name || + !strcmp(src_name, "none")) + return DPU_CRTC_CRC_SOURCE_NONE; + if (!strcmp(src_name, "auto") || + !strcmp(src_name, "lm")) + return DPU_CRTC_CRC_SOURCE_LAYER_MIXER; + + return DPU_CRTC_CRC_SOURCE_INVALID; +} - encoder = get_encoder_from_crtc(crtc); +static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, size_t *values_cnt) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state); + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) + *values_cnt = crtc_state->num_mixers; + + return 0; +} + +static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ + enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name); + enum dpu_crtc_crc_source current_source; + struct dpu_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + struct dpu_crtc_mixer *m; + + bool was_enabled; + bool enable = false; + int i, ret = 0; + + if (source < 0) { + DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index); + return -EINVAL; + } + + ret = drm_modeset_lock(&crtc->mutex, NULL); + + if (ret) + return ret; + + enable = (source != DPU_CRTC_CRC_SOURCE_NONE); + crtc_state = to_dpu_crtc_state(crtc->state); + + spin_lock_irq(&drm_dev->event_lock); + current_source = crtc_state->crc_source; + spin_unlock_irq(&drm_dev->event_lock); + + was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE); + + if (!was_enabled && enable) { + ret = drm_crtc_vblank_get(crtc); + + if (ret) + goto cleanup; + + } else if (was_enabled && !enable) { + drm_crtc_vblank_put(crtc); + } + + spin_lock_irq(&drm_dev->event_lock); + crtc_state->crc_source = source; + spin_unlock_irq(&drm_dev->event_lock); + + crtc_state->crc_frame_skip_count = 0; + + for (i = 0; i < crtc_state->num_mixers; ++i) { + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.setup_misr) + continue; + + /* Calculate MISR over 1 frame */ + m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); + } + + +cleanup: + drm_modeset_unlock(&crtc->mutex); + + return ret; +} + +static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc) +{ + struct drm_encoder *encoder = get_encoder_from_crtc(crtc); if (!encoder) { DRM_ERROR("no encoder found for crtc %d\n", crtc->index); - return false; + return 0; + } + + return dpu_encoder_get_vsync_count(encoder); +} + + +static int dpu_crtc_get_crc(struct drm_crtc *crtc) +{ + struct dpu_crtc_state *crtc_state; + struct dpu_crtc_mixer *m; + u32 crcs[CRTC_DUAL_MIXERS]; + + int i = 0; + int rc = 0; + + crtc_state = to_dpu_crtc_state(crtc->state); + + BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers)); + + /* Skip first 2 frames in case of "uncooked" CRCs */ + if (crtc_state->crc_frame_skip_count < 2) { + crtc_state->crc_frame_skip_count++; + return 0; } - return dpu_encoder_get_frame_count(encoder); + for (i = 0; i < crtc_state->num_mixers; ++i) { + + m = &crtc_state->mixers[i]; + + if (!m->hw_lm || !m->hw_lm->ops.collect_misr) + continue; + + rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]); + + if (rc) { + DRM_DEBUG_DRIVER("MISR read failed\n"); + return rc; + } + } + + return drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); } static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc, @@ -389,6 +519,9 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc) dpu_crtc->vblank_cb_time = ktime_get(); else dpu_crtc->vblank_cb_count++; + + dpu_crtc_get_crc(crtc); + drm_crtc_handle_vblank(crtc); trace_dpu_crtc_vblank_cb(DRMID(crtc)); } @@ -1332,6 +1465,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = { .atomic_destroy_state = dpu_crtc_destroy_state, .late_register = dpu_crtc_late_register, .early_unregister = dpu_crtc_early_unregister, + .verify_crc_source = dpu_crtc_verify_crc_source, + .set_crc_source = dpu_crtc_set_crc_source, .enable_vblank = msm_crtc_enable_vblank, .disable_vblank = msm_crtc_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h index cec3474340e8..ae9546ca1359 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> */ @@ -70,6 +70,19 @@ struct dpu_crtc_smmu_state_data { }; /** + * enum dpu_crtc_crc_source: CRC source + * @DPU_CRTC_CRC_SOURCE_NONE: no source set + * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer + * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source + */ +enum dpu_crtc_crc_source { + DPU_CRTC_CRC_SOURCE_NONE = 0, + DPU_CRTC_CRC_SOURCE_LAYER_MIXER, + DPU_CRTC_CRC_SOURCE_MAX, + DPU_CRTC_CRC_SOURCE_INVALID = -1 +}; + +/** * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context * @lm_ctl: CTL Path HW driver context @@ -139,6 +152,7 @@ struct dpu_crtc_frame_event { * @event_lock : Spinlock around event handling code * @phandle: Pointer to power handler * @cur_perf : current performance committed to clock/bandwidth driver + * @crc_source : CRC source */ struct dpu_crtc { struct drm_crtc base; @@ -210,6 +224,9 @@ struct dpu_crtc_state { u32 num_ctls; struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; + + enum dpu_crtc_crc_source crc_source; + int crc_frame_skip_count; }; #define to_dpu_crtc_state(x) \ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e9d3fa1544b..e7ee4cfb8461 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -168,6 +168,7 @@ enum dpu_enc_rc_states { * @vsync_event_work: worker to handle vsync event for autorefresh * @topology: topology of the display * @idle_timeout: idle timeout duration in milliseconds + * @dp: msm_dp pointer, for DP encoders */ struct dpu_encoder_virt { struct drm_encoder base; @@ -206,6 +207,8 @@ struct dpu_encoder_virt { struct msm_display_topology topology; u32 idle_timeout; + + struct msm_dp *dp; }; #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base) @@ -395,19 +398,11 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc, return 0; } -int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc) +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc) { - struct dpu_encoder_virt *dpu_enc; - struct dpu_encoder_phys *phys; - int framecount = 0; - - dpu_enc = to_dpu_encoder_virt(drm_enc); - phys = dpu_enc ? dpu_enc->cur_master : NULL; - - if (phys && phys->ops.get_frame_count) - framecount = phys->ops.get_frame_count(phys); - - return framecount; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; + return phys ? atomic_read(&phys->vsync_cnt) : 0; } int dpu_encoder_get_linecount(struct drm_encoder *drm_enc) @@ -1000,8 +995,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc, trace_dpu_enc_mode_set(DRMID(drm_enc)); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) - msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) + msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode); list_for_each_entry(conn_iter, connector_list, head) if (conn_iter->encoder == drm_enc) @@ -1182,9 +1177,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc) _dpu_encoder_virt_enable_helper(drm_enc); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - ret = msm_dp_display_enable(priv->dp, - drm_enc); + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + ret = msm_dp_display_enable(dpu_enc->dp, drm_enc); if (ret) { DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n", ret); @@ -1224,8 +1218,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) /* wait for idle */ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - if (msm_dp_display_pre_disable(priv->dp, drm_enc)) + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc)) DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n"); } @@ -1253,8 +1247,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) { - if (msm_dp_display_disable(priv->dp, drm_enc)) + if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) { + if (msm_dp_display_disable(dpu_enc->dp, drm_enc)) DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n"); } @@ -2170,7 +2164,8 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, timer_setup(&dpu_enc->vsync_event_timer, dpu_encoder_vsync_event_handler, 0); - + else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS) + dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]]; INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h index 99a5d73c9b88..e241914a9677 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h @@ -163,9 +163,9 @@ void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc, int dpu_encoder_get_linecount(struct drm_encoder *drm_enc); /** - * dpu_encoder_get_frame_count - get interface frame count for the encoder. + * dpu_encoder_get_vsync_count - get vsync count for the encoder. * @drm_enc: Pointer to previously created drm encoder structure */ -int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc); +int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc); #endif /* __DPU_ENCODER_H__ */ diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c index aa01698d6b25..34a6940d12c5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c @@ -42,7 +42,7 @@ static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc) { - return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false; + return (phys_enc->split_role != ENC_ROLE_SLAVE); } static bool dpu_encoder_phys_cmd_mode_fixup( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index b131fd376192..ce6f32a919e5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -794,7 +794,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = { DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1), PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk, - DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), + DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1), }; @@ -844,7 +844,7 @@ static const struct dpu_intf_cfg sdm845_intf[] = { }; static const struct dpu_intf_cfg sc7180_intf[] = { - INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), + INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25), INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27), }; @@ -958,12 +958,6 @@ static const struct dpu_perf_cfg sdm845_perf_data = { .min_core_ib = 2400000, .min_llcc_ib = 800000, .min_dram_ib = 800000, - .core_ib_ff = "6.0", - .core_clk_ff = "1.0", - .comp_ratio_rt = - "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23", - .comp_ratio_nrt = - "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25", .undersized_prefill_lines = 2, .xtra_prefill_lines = 2, .dest_scale_prefill_lines = 3, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h index d2a945a27cfa..4ade44bbd37e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h @@ -676,10 +676,6 @@ struct dpu_perf_cdp_cfg { * @min_core_ib minimum mnoc ib vote in kbps * @min_llcc_ib minimum llcc ib vote in kbps * @min_dram_ib minimum dram ib vote in kbps - * @core_ib_ff core instantaneous bandwidth fudge factor - * @core_clk_ff core clock fudge factor - * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio> - * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio> * @undersized_prefill_lines undersized prefill in lines * @xtra_prefill_lines extra prefill latency in lines * @dest_scale_prefill_lines destination scaler latency in lines @@ -702,10 +698,6 @@ struct dpu_perf_cfg { u32 min_core_ib; u32 min_llcc_ib; u32 min_dram_ib; - const char *core_ib_ff; - const char *core_clk_ff; - const char *comp_ratio_rt; - const char *comp_ratio_nrt; u32 undersized_prefill_lines; u32 xtra_prefill_lines; u32 dest_scale_prefill_lines; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c index 2e816f232e85..d2b6dca487e3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c @@ -3,12 +3,15 @@ */ #include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/slab.h> +#include "dpu_core_irq.h" #include "dpu_kms.h" #include "dpu_hw_interrupts.h" #include "dpu_hw_util.h" #include "dpu_hw_mdss.h" +#include "dpu_trace.h" /** * Register offsets in MDSS register file for the interrupt registers @@ -117,25 +120,33 @@ static const struct dpu_intr_reg dpu_intr_set[] = { #define DPU_IRQ_REG(irq_idx) (irq_idx / 32) #define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32)) -static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr, - int irq_idx) +/** + * dpu_core_irq_callback_handler - dispatch core interrupts + * @arg: private data of callback handler + * @irq_idx: interrupt index + */ +static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx) { - int reg_idx; + struct dpu_irq_callback *cb; - if (!intr) - return; + VERB("irq_idx=%d\n", irq_idx); - reg_idx = DPU_IRQ_REG(irq_idx); - DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx)); + if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) + DRM_ERROR("no registered cb, idx:%d\n", irq_idx); - /* ensure register writes go through */ - wmb(); + atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]); + + /* + * Perform registered function callback + */ + list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list) + if (cb->func) + cb->func(cb->arg, irq_idx); } -static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, - void (*cbfunc)(void *, int), - void *arg) +irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int reg_idx; int irq_idx; u32 irq_status; @@ -144,13 +155,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, unsigned long irq_flags; if (!intr) - return; + return IRQ_NONE; - /* - * The dispatcher will save the IRQ status before calling here. - * Now need to go through each IRQ status and find matching - * irq lookup index. - */ spin_lock_irqsave(&intr->irq_lock, irq_flags); for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) { if (!test_bit(reg_idx, &intr->irq_mask)) @@ -178,17 +184,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, */ while ((bit = ffs(irq_status)) != 0) { irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1); - /* - * Once a match on irq mask, perform a callback - * to the given cbfunc. cbfunc will take care - * the interrupt status clearing. If cbfunc is - * not provided, then the interrupt clearing - * is here. - */ - if (cbfunc) - cbfunc(arg, irq_idx); - dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx); + dpu_core_irq_callback_handler(dpu_kms, irq_idx); /* * When callback finish, clear the irq_status @@ -203,6 +200,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, wmb(); spin_unlock_irqrestore(&intr->irq_lock, irq_flags); + + return IRQ_HANDLED; } static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) @@ -303,12 +302,13 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) return 0; } -static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr) +static void dpu_clear_irqs(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int i; if (!intr) - return -EINVAL; + return; for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { if (test_bit(i, &intr->irq_mask)) @@ -318,16 +318,15 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr) /* ensure register writes go through */ wmb(); - - return 0; } -static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) +static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int i; if (!intr) - return -EINVAL; + return; for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { if (test_bit(i, &intr->irq_mask)) @@ -337,13 +336,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr) /* ensure register writes go through */ wmb(); - - return 0; } -static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, - int irq_idx, bool clear) +u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear) { + struct dpu_hw_intr *intr = dpu_kms->hw_intr; int reg_idx; unsigned long irq_flags; u32 intr_status; @@ -351,6 +348,12 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, if (!intr) return 0; + if (irq_idx < 0) { + DPU_ERROR("[%pS] invalid irq_idx=%d\n", + __builtin_return_address(0), irq_idx); + return 0; + } + if (irq_idx < 0 || irq_idx >= intr->total_irqs) { pr_err("invalid IRQ index: [%d]\n", irq_idx); return 0; @@ -374,32 +377,6 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, return intr_status; } -static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr) -{ - unsigned long irq_flags; - - spin_lock_irqsave(&intr->irq_lock, irq_flags); - - return irq_flags; -} - -static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags) -{ - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); -} - -static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) -{ - ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked; - ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked; - ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; - ops->clear_all_irqs = dpu_hw_intr_clear_irqs; - ops->disable_all_irqs = dpu_hw_intr_disable_irqs; - ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; - ops->lock = dpu_hw_intr_lock; - ops->unlock = dpu_hw_intr_unlock; -} - static void __intr_offset(struct dpu_mdss_cfg *m, void __iomem *addr, struct dpu_hw_blk_reg_map *hw) { @@ -421,7 +398,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, return ERR_PTR(-ENOMEM); __intr_offset(m, addr, &intr->hw); - __setup_intr_ops(&intr->ops); intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32; @@ -443,7 +419,168 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) { if (intr) { kfree(intr->cache_irq_mask); + + kfree(intr->irq_cb_tbl); + kfree(intr->irq_counts); + kfree(intr); } } +int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, + struct dpu_irq_callback *register_irq_cb) +{ + unsigned long irq_flags; + + if (!dpu_kms->hw_intr->irq_cb_tbl) { + DPU_ERROR("invalid params\n"); + return -EINVAL; + } + + if (!register_irq_cb || !register_irq_cb->func) { + DPU_ERROR("invalid irq_cb:%d func:%d\n", + register_irq_cb != NULL, + register_irq_cb ? + register_irq_cb->func != NULL : -1); + return -EINVAL; + } + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb); + list_del_init(®ister_irq_cb->list); + list_add_tail(®ister_irq_cb->list, + &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]); + if (list_is_first(®ister_irq_cb->list, + &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) { + int ret = dpu_hw_intr_enable_irq_locked( + dpu_kms->hw_intr, + irq_idx); + if (ret) + DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", + irq_idx); + } + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + return 0; +} + +int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx, + struct dpu_irq_callback *register_irq_cb) +{ + unsigned long irq_flags; + + if (!dpu_kms->hw_intr->irq_cb_tbl) { + DPU_ERROR("invalid params\n"); + return -EINVAL; + } + + if (!register_irq_cb || !register_irq_cb->func) { + DPU_ERROR("invalid irq_cb:%d func:%d\n", + register_irq_cb != NULL, + register_irq_cb ? + register_irq_cb->func != NULL : -1); + return -EINVAL; + } + + if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { + DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); + return -EINVAL; + } + + VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); + + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb); + list_del_init(®ister_irq_cb->list); + /* empty callback list but interrupt is still enabled */ + if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) { + int ret = dpu_hw_intr_disable_irq_locked( + dpu_kms->hw_intr, + irq_idx); + if (ret) + DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n", + irq_idx); + VERB("irq_idx=%d ret=%d\n", irq_idx, ret); + } + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) +{ + struct dpu_kms *dpu_kms = s->private; + struct dpu_irq_callback *cb; + unsigned long irq_flags; + int i, irq_count, cb_count; + + if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl)) + return 0; + + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { + spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); + cb_count = 0; + irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]); + list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list) + cb_count++; + spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); + + if (irq_count || cb_count) + seq_printf(s, "idx:%d irq:%d cb:%d\n", + i, irq_count, cb_count); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); + +void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, + struct dentry *parent) +{ + debugfs_create_file("core_irq", 0600, parent, dpu_kms, + &dpu_debugfs_core_irq_fops); +} +#endif + +void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms) +{ + int i; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); + + /* Create irq callbacks for all possible irq_idx */ + dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs, + sizeof(struct list_head), GFP_KERNEL); + dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs, + sizeof(atomic_t), GFP_KERNEL); + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { + INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]); + atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0); + } +} + +void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms) +{ + int i; + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) + if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i])) + DPU_ERROR("irq_idx=%d still enabled/registered\n", i); + + dpu_clear_irqs(dpu_kms); + dpu_disable_all_irqs(dpu_kms); + pm_runtime_put_sync(&dpu_kms->pdev->dev); +} diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h index ac83c1159815..d50e78c9f148 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h @@ -32,92 +32,6 @@ enum dpu_hw_intr_reg { #define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset) -struct dpu_hw_intr; - -/** - * Interrupt operations. - */ -struct dpu_hw_intr_ops { - - /** - * enable_irq - Enable IRQ based on lookup IRQ index - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @return: 0 for success, otherwise failure - */ - int (*enable_irq_locked)( - struct dpu_hw_intr *intr, - int irq_idx); - - /** - * disable_irq - Disable IRQ based on lookup IRQ index - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @return: 0 for success, otherwise failure - */ - int (*disable_irq_locked)( - struct dpu_hw_intr *intr, - int irq_idx); - - /** - * clear_all_irqs - Clears all the interrupts (i.e. acknowledges - * any asserted IRQs). Useful during reset. - * @intr: HW interrupt handle - * @return: 0 for success, otherwise failure - */ - int (*clear_all_irqs)( - struct dpu_hw_intr *intr); - - /** - * disable_all_irqs - Disables all the interrupts. Useful during reset. - * @intr: HW interrupt handle - * @return: 0 for success, otherwise failure - */ - int (*disable_all_irqs)( - struct dpu_hw_intr *intr); - - /** - * dispatch_irqs - IRQ dispatcher will call the given callback - * function when a matching interrupt status bit is - * found in the irq mapping table. - * @intr: HW interrupt handle - * @cbfunc: Callback function pointer - * @arg: Argument to pass back during callback - */ - void (*dispatch_irqs)( - struct dpu_hw_intr *intr, - void (*cbfunc)(void *arg, int irq_idx), - void *arg); - - /** - * get_interrupt_status - Gets HW interrupt status, and clear if set, - * based on given lookup IRQ index. - * @intr: HW interrupt handle - * @irq_idx: Lookup irq index return from irq_idx_lookup - * @clear: True to clear irq after read - */ - u32 (*get_interrupt_status)( - struct dpu_hw_intr *intr, - int irq_idx, - bool clear); - - /** - * lock - take the IRQ lock - * @intr: HW interrupt handle - * @return: irq_flags for the taken spinlock - */ - unsigned long (*lock)( - struct dpu_hw_intr *intr); - - /** - * unlock - take the IRQ lock - * @intr: HW interrupt handle - * @irq_flags: the irq_flags returned from lock - */ - void (*unlock)( - struct dpu_hw_intr *intr, unsigned long irq_flags); -}; - /** * struct dpu_hw_intr: hw interrupts handling data structure * @hw: virtual address mapping @@ -126,15 +40,19 @@ struct dpu_hw_intr_ops { * @save_irq_status: array of IRQ status reg storage created during init * @total_irqs: total number of irq_idx mapped in the hw_interrupts * @irq_lock: spinlock for accessing IRQ resources + * @irq_cb_tbl: array of IRQ callbacks lists + * @irq_counts: array of IRQ counts */ struct dpu_hw_intr { struct dpu_hw_blk_reg_map hw; - struct dpu_hw_intr_ops ops; u32 *cache_irq_mask; u32 *save_irq_status; u32 total_irqs; spinlock_t irq_lock; unsigned long irq_mask; + + struct list_head *irq_cb_tbl; + atomic_t *irq_counts; }; /** diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index cb6bb7a22c15..86363c0ec834 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #include "dpu_kms.h" @@ -24,6 +25,15 @@ #define LM_BLEND0_FG_ALPHA 0x04 #define LM_BLEND0_BG_ALPHA 0x08 +#define LM_MISR_CTRL 0x310 +#define LM_MISR_SIGNATURE 0x314 +#define LM_MISR_FRAME_COUNT_MASK 0xFF +#define LM_MISR_CTRL_ENABLE BIT(8) +#define LM_MISR_CTRL_STATUS BIT(9) +#define LM_MISR_CTRL_STATUS_CLEAR BIT(10) +#define LM_MISR_CTRL_FREE_RUN_MASK BIT(31) + + static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer, const struct dpu_mdss_cfg *m, void __iomem *addr, @@ -96,6 +106,48 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx, } } +static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 config = 0; + + DPU_REG_WRITE(c, LM_MISR_CTRL, LM_MISR_CTRL_STATUS_CLEAR); + + /* Clear old MISR value (in case it's read before a new value is calculated)*/ + wmb(); + + if (enable) { + config = (frame_count & LM_MISR_FRAME_COUNT_MASK) | + LM_MISR_CTRL_ENABLE | LM_MISR_CTRL_FREE_RUN_MASK; + + DPU_REG_WRITE(c, LM_MISR_CTRL, config); + } else { + DPU_REG_WRITE(c, LM_MISR_CTRL, 0); + } + +} + +static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) +{ + struct dpu_hw_blk_reg_map *c = &ctx->hw; + u32 ctrl = 0; + + if (!misr_value) + return -EINVAL; + + ctrl = DPU_REG_READ(c, LM_MISR_CTRL); + + if (!(ctrl & LM_MISR_CTRL_ENABLE)) + return -EINVAL; + + if (!(ctrl & LM_MISR_CTRL_STATUS)) + return -EINVAL; + + *misr_value = DPU_REG_READ(c, LM_MISR_SIGNATURE); + + return 0; +} + static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx, u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op) { @@ -158,6 +210,8 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m, ops->setup_blend_config = dpu_hw_lm_setup_blend_config; ops->setup_alpha_out = dpu_hw_lm_setup_color3; ops->setup_border_color = dpu_hw_lm_setup_border_color; + ops->setup_misr = dpu_hw_lm_setup_misr; + ops->collect_misr = dpu_hw_lm_collect_misr; } struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 4a6b2de19ef6..d8052fb2d5da 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #ifndef _DPU_HW_LM_H @@ -53,6 +54,16 @@ struct dpu_hw_lm_ops { void (*setup_border_color)(struct dpu_hw_mixer *ctx, struct dpu_mdss_color *color, u8 border_en); + + /** + * setup_misr: Enable/disable MISR + */ + void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count); + + /** + * collect_misr: Read MISR signature + */ + int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value); }; struct dpu_hw_mixer { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c index 69eed7932486..f9460672176a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c @@ -138,11 +138,13 @@ static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx, u32 *idx) { int rc = 0; - const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk; + const struct dpu_sspp_sub_blks *sblk; - if (!ctx) + if (!ctx || !ctx->cap || !ctx->cap->sblk) return -EINVAL; + sblk = ctx->cap->sblk; + switch (s_id) { case DPU_SSPP_SRC: *idx = sblk->src_blk.base; @@ -419,7 +421,7 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx, (void)pe; if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp - || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk) + || !scaler3_cfg) return; dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index ff3cffde84cd..6d4911957e33 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ #ifndef _DPU_HW_UTIL_H diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index ae48f41821cf..a15b26428280 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -188,6 +188,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) struct dentry *entry; struct drm_device *dev; struct msm_drm_private *priv; + int i; if (!p) return -EINVAL; @@ -203,8 +204,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor) dpu_debugfs_vbif_init(dpu_kms, entry); dpu_debugfs_core_irq_init(dpu_kms, entry); - if (priv->dp) - msm_dp_debugfs_init(priv->dp, minor); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (priv->dp[i]) + msm_dp_debugfs_init(priv->dp[i], minor); + } return dpu_core_perf_debugfs_init(dpu_kms, entry); } @@ -544,35 +547,42 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev, { struct drm_encoder *encoder = NULL; struct msm_display_info info; - int rc = 0; + int rc; + int i; - if (!priv->dp) - return rc; + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; - encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); - if (IS_ERR(encoder)) { - DPU_ERROR("encoder init failed for dsi display\n"); - return PTR_ERR(encoder); - } + encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(encoder)) { + DPU_ERROR("encoder init failed for dsi display\n"); + return PTR_ERR(encoder); + } - memset(&info, 0, sizeof(info)); - rc = msm_dp_modeset_init(priv->dp, dev, encoder); - if (rc) { - DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); - drm_encoder_cleanup(encoder); - return rc; - } + memset(&info, 0, sizeof(info)); + rc = msm_dp_modeset_init(priv->dp[i], dev, encoder); + if (rc) { + DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc); + drm_encoder_cleanup(encoder); + return rc; + } - priv->encoders[priv->num_encoders++] = encoder; + priv->encoders[priv->num_encoders++] = encoder; - info.num_of_h_tiles = 1; - info.capabilities = MSM_DISPLAY_CAP_VID_MODE; - info.intf_type = encoder->encoder_type; - rc = dpu_encoder_setup(dev, encoder, &info); - if (rc) - DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", - encoder->base.id, rc); - return rc; + info.num_of_h_tiles = 1; + info.h_tile_instance[0] = i; + info.capabilities = MSM_DISPLAY_CAP_VID_MODE; + info.intf_type = encoder->encoder_type; + rc = dpu_encoder_setup(dev, encoder, &info); + if (rc) { + DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n", + encoder->base.id, rc); + return rc; + } + } + + return 0; } /** @@ -792,6 +802,7 @@ static int dpu_irq_postinstall(struct msm_kms *kms) { struct msm_drm_private *priv; struct dpu_kms *dpu_kms = to_dpu_kms(kms); + int i; if (!dpu_kms || !dpu_kms->dev) return -EINVAL; @@ -800,7 +811,8 @@ static int dpu_irq_postinstall(struct msm_kms *kms) if (!priv) return -EINVAL; - msm_dp_irq_postinstall(priv->dp); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) + msm_dp_irq_postinstall(priv->dp[i]); return 0; } @@ -908,6 +920,10 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) return 0; mmu = msm_iommu_new(dpu_kms->dev->dev, domain); + if (IS_ERR(mmu)) { + iommu_domain_free(domain); + return PTR_ERR(mmu); + } aspace = msm_gem_address_space_create(mmu, "dpu1", 0x1000, 0x100000000 - 0x1000); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 323a6bce9e64..775bcbda860f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -78,18 +78,6 @@ struct dpu_irq_callback { void *arg; }; -/** - * struct dpu_irq: IRQ structure contains callback registration info - * @total_irq: total number of irq_idx obtained from HW interrupts mapping - * @irq_cb_tbl: array of IRQ callbacks setting - * @debugfs_file: debugfs file for irq statistics - */ -struct dpu_irq { - u32 total_irqs; - struct list_head *irq_cb_tbl; - atomic_t *irq_counts; -}; - struct dpu_kms { struct msm_kms base; struct drm_device *dev; @@ -104,7 +92,6 @@ struct dpu_kms { struct regulator *venus; struct dpu_hw_intr *hw_intr; - struct dpu_irq irq_obj; struct dpu_core_perf perf; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index c989621209aa..a3e3b9d1b82e 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1193,7 +1193,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane) if (DPU_FORMAT_IS_YUV(fmt)) _dpu_plane_setup_csc(pdpu); else - pdpu->csc_ptr = 0; + pdpu->csc_ptr = NULL; } _dpu_plane_set_qos_lut(plane, fb); @@ -1330,7 +1330,7 @@ static void dpu_plane_reset(struct drm_plane *plane) /* remove previous state, if present */ if (plane->state) { dpu_plane_destroy_state(plane, plane->state); - plane->state = 0; + plane->state = NULL; } pstate = kzalloc(sizeof(*pstate), GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index cdcaf470f148..5a33bb148e9e 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -173,12 +173,9 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms) DBG(""); clk_disable_unprepare(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_disable_unprepare(mdp4_kms->pclk); - if (mdp4_kms->lut_clk) - clk_disable_unprepare(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_disable_unprepare(mdp4_kms->axi_clk); + clk_disable_unprepare(mdp4_kms->pclk); + clk_disable_unprepare(mdp4_kms->lut_clk); + clk_disable_unprepare(mdp4_kms->axi_clk); return 0; } @@ -188,12 +185,9 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms) DBG(""); clk_prepare_enable(mdp4_kms->clk); - if (mdp4_kms->pclk) - clk_prepare_enable(mdp4_kms->pclk); - if (mdp4_kms->lut_clk) - clk_prepare_enable(mdp4_kms->lut_clk); - if (mdp4_kms->axi_clk) - clk_prepare_enable(mdp4_kms->axi_clk); + clk_prepare_enable(mdp4_kms->pclk); + clk_prepare_enable(mdp4_kms->lut_clk); + clk_prepare_enable(mdp4_kms->axi_clk); return 0; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 9741544ffc35..1bf9ff5dbabc 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c @@ -752,6 +752,94 @@ const struct mdp5_cfg_hw msm8x76_config = { .max_clk = 360000000, }; +static const struct mdp5_cfg_hw msm8x53_config = { + .name = "msm8x53", + .mdp = { + .count = 1, + .caps = MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT, + }, + .ctl = { + .count = 3, + .base = { 0x01000, 0x01200, 0x01400 }, + .flush_hw_mask = 0xffffffff, + }, + .pipe_vig = { + .count = 1, + .base = { 0x04000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SCALE | + MDP_PIPE_CAP_CSC | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_rgb = { + .count = 2, + .base = { 0x14000, 0x16000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_DECIMATION | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_dma = { + .count = 1, + .base = { 0x24000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + 0, + }, + .pipe_cursor = { + .count = 1, + .base = { 0x34000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + + .lm = { + .count = 3, + .base = { 0x44000, 0x45000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR }, + { .id = 1, .pp = 1, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY }, + }, + .nb_stages = 5, + .max_width = 2048, + .max_height = 0xFFFF, + }, + .dspp = { + .count = 1, + .base = { 0x54000 }, + + }, + .pp = { + .count = 2, + .base = { 0x70000, 0x70800 }, + }, + .cdm = { + .count = 1, + .base = { 0x79200 }, + }, + .intf = { + .base = { 0x6a000, 0x6a800, 0x6b000 }, + .connect = { + [0] = INTF_DISABLED, + [1] = INTF_DSI, + [2] = INTF_DSI, + }, + }, + .max_clk = 400000000, +}; + static const struct mdp5_cfg_hw msm8917_config = { .name = "msm8917", .mdp = { @@ -1151,6 +1239,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = { { .revision = 7, .config = { .hw = &msm8x96_config } }, { .revision = 11, .config = { .hw = &msm8x76_config } }, { .revision = 15, .config = { .hw = &msm8917_config } }, + { .revision = 16, .config = { .hw = &msm8x53_config } }, }; static const struct mdp5_cfg_handler cfg_handlers_v3[] = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index f482e0911d03..bb7d066618e6 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1125,6 +1125,20 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); } +static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .atomic_print_state = mdp5_crtc_atomic_print_state, + .get_vblank_counter = mdp5_crtc_get_vblank_counter, + .enable_vblank = msm_crtc_enable_vblank, + .disable_vblank = msm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +}; + static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, @@ -1313,6 +1327,8 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->lm_cursor_enabled = cursor_plane ? false : true; drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + cursor_plane ? + &mdp5_crtc_no_lm_cursor_funcs : &mdp5_crtc_funcs, NULL); drm_flip_work_init(&mdp5_crtc->unref_cursor_work, diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index b3b42672b2d4..7b242246d4e7 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -295,15 +295,12 @@ static int mdp5_disable(struct mdp5_kms *mdp5_kms) mdp5_kms->enable_count--; WARN_ON(mdp5_kms->enable_count < 0); - if (mdp5_kms->tbu_rt_clk) - clk_disable_unprepare(mdp5_kms->tbu_rt_clk); - if (mdp5_kms->tbu_clk) - clk_disable_unprepare(mdp5_kms->tbu_clk); + clk_disable_unprepare(mdp5_kms->tbu_rt_clk); + clk_disable_unprepare(mdp5_kms->tbu_clk); clk_disable_unprepare(mdp5_kms->ahb_clk); clk_disable_unprepare(mdp5_kms->axi_clk); clk_disable_unprepare(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_disable_unprepare(mdp5_kms->lut_clk); + clk_disable_unprepare(mdp5_kms->lut_clk); return 0; } @@ -317,12 +314,9 @@ static int mdp5_enable(struct mdp5_kms *mdp5_kms) clk_prepare_enable(mdp5_kms->ahb_clk); clk_prepare_enable(mdp5_kms->axi_clk); clk_prepare_enable(mdp5_kms->core_clk); - if (mdp5_kms->lut_clk) - clk_prepare_enable(mdp5_kms->lut_clk); - if (mdp5_kms->tbu_clk) - clk_prepare_enable(mdp5_kms->tbu_clk); - if (mdp5_kms->tbu_rt_clk) - clk_prepare_enable(mdp5_kms->tbu_rt_clk); + clk_prepare_enable(mdp5_kms->lut_clk); + clk_prepare_enable(mdp5_kms->tbu_clk); + clk_prepare_enable(mdp5_kms->tbu_rt_clk); return 0; } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c index 2f4895bcb0b0..0ea53420bc40 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c @@ -136,10 +136,8 @@ static int mdp5_mdss_enable(struct msm_mdss *mdss) DBG(""); clk_prepare_enable(mdp5_mdss->ahb_clk); - if (mdp5_mdss->axi_clk) - clk_prepare_enable(mdp5_mdss->axi_clk); - if (mdp5_mdss->vsync_clk) - clk_prepare_enable(mdp5_mdss->vsync_clk); + clk_prepare_enable(mdp5_mdss->axi_clk); + clk_prepare_enable(mdp5_mdss->vsync_clk); return 0; } @@ -149,10 +147,8 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss) struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss); DBG(""); - if (mdp5_mdss->vsync_clk) - clk_disable_unprepare(mdp5_mdss->vsync_clk); - if (mdp5_mdss->axi_clk) - clk_disable_unprepare(mdp5_mdss->axi_clk); + clk_disable_unprepare(mdp5_mdss->vsync_clk); + clk_disable_unprepare(mdp5_mdss->axi_clk); clk_disable_unprepare(mdp5_mdss->ahb_clk); return 0; diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c index cabe15190ec1..2e1acb1bc390 100644 --- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c +++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c @@ -126,8 +126,12 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state) priv = drm_dev->dev_private; kms = priv->kms; - if (priv->dp) - msm_dp_snapshot(disp_state, priv->dp); + for (i = 0; i < ARRAY_SIZE(priv->dp); i++) { + if (!priv->dp[i]) + continue; + + msm_dp_snapshot(disp_state, priv->dp[i]); + } for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { if (!priv->dsi[i]) diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index eb40d8413bca..6d36f63c3338 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -33,6 +33,7 @@ struct dp_aux_private { bool read; bool no_send_addr; bool no_send_stop; + bool initted; u32 offset; u32 segment; @@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } mutex_lock(&aux->mutex); + if (!aux->initted) { + ret = -EIO; + goto exit; + } dp_aux_update_offset_and_segment(aux, msg); dp_aux_transfer_helper(aux, msg, true); @@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } aux->cmd_busy = false; + +exit: mutex_unlock(&aux->mutex); return ret; @@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; + aux->initted = true; + + mutex_unlock(&aux->mutex); } void dp_aux_deinit(struct drm_dp_aux *dp_aux) @@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + + aux->initted = false; dp_catalog_aux_enable(aux->catalog, false); + + mutex_unlock(&aux->mutex); } int dp_aux_register(struct drm_dp_aux *dp_aux) diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c index cc2bb8295329..6ae9b29044b6 100644 --- a/drivers/gpu/drm/msm/dp/dp_catalog.c +++ b/drivers/gpu/drm/msm/dp/dp_catalog.c @@ -24,15 +24,6 @@ #define DP_INTERRUPT_STATUS_ACK_SHIFT 1 #define DP_INTERRUPT_STATUS_MASK_SHIFT 2 -#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000 -#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200 -#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200 -#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200 -#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400 -#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00 -#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000 -#define MSM_DP_CONTROLLER_P0_SIZE 0x0400 - #define DP_INTERRUPT_STATUS1 \ (DP_INTR_AUX_I2C_DONE| \ DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \ @@ -66,82 +57,77 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d { struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + struct dss_io_data *dss = &catalog->io->dp_controller; - msm_disp_snapshot_add_block(disp_state, catalog->io->dp_controller.len, - catalog->io->dp_controller.base, "dp_ctrl"); + msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb"); + msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux"); + msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link"); + msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0"); } static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_AUX_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.aux.base + offset); } static inline void dp_write_aux(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_AUX_OFFSET; /* * To make sure aux reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.aux.base + offset); } static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_AHB_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.ahb.base + offset); } static inline void dp_write_ahb(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_AHB_OFFSET; /* * To make sure phy reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.ahb.base + offset); } static inline void dp_write_p0(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_P0_OFFSET; /* * To make sure interface reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.p0.base + offset); } static inline u32 dp_read_p0(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_P0_OFFSET; /* * To make sure interface reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.p0.base + offset); } static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset) { - offset += MSM_DP_CONTROLLER_LINK_OFFSET; - return readl_relaxed(catalog->io->dp_controller.base + offset); + return readl_relaxed(catalog->io->dp_controller.link.base + offset); } static inline void dp_write_link(struct dp_catalog_private *catalog, u32 offset, u32 data) { - offset += MSM_DP_CONTROLLER_LINK_OFFSET; /* * To make sure link reg writes happens before any other operation, * this function uses writel() instread of writel_relaxed() */ - writel(data, catalog->io->dp_controller.base + offset); + writel(data, catalog->io->dp_controller.link.base + offset); } /* aux related catalog functions */ @@ -276,29 +262,21 @@ static void dump_regs(void __iomem *base, int len) void dp_catalog_dump_regs(struct dp_catalog *dp_catalog) { - u32 offset, len; struct dp_catalog_private *catalog = container_of(dp_catalog, struct dp_catalog_private, dp_catalog); + struct dss_io_data *io = &catalog->io->dp_controller; pr_info("AHB regs\n"); - offset = MSM_DP_CONTROLLER_AHB_OFFSET; - len = MSM_DP_CONTROLLER_AHB_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->ahb.base, io->ahb.len); pr_info("AUXCLK regs\n"); - offset = MSM_DP_CONTROLLER_AUX_OFFSET; - len = MSM_DP_CONTROLLER_AUX_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->aux.base, io->aux.len); pr_info("LCLK regs\n"); - offset = MSM_DP_CONTROLLER_LINK_OFFSET; - len = MSM_DP_CONTROLLER_LINK_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->link.base, io->link.len); pr_info("P0CLK regs\n"); - offset = MSM_DP_CONTROLLER_P0_OFFSET; - len = MSM_DP_CONTROLLER_P0_SIZE; - dump_regs(catalog->io->dp_controller.base + offset, len); + dump_regs(io->p0.base, io->p0.len); } u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog) @@ -493,8 +471,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT; /* Poll for mainlink ready status */ - ret = readx_poll_timeout(readl, catalog->io->dp_controller.base + - MSM_DP_CONTROLLER_LINK_OFFSET + + ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base + REG_DP_MAINLINK_READY, data, data & bit, POLLING_SLEEP_US, POLLING_TIMEOUT_US); @@ -541,8 +518,7 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog) struct dp_catalog_private, dp_catalog); /* Poll for mainlink ready status */ - ret = readl_poll_timeout(catalog->io->dp_controller.base + - MSM_DP_CONTROLLER_LINK_OFFSET + + ret = readl_poll_timeout(catalog->io->dp_controller.link.base + REG_DP_MAINLINK_READY, data, data & DP_MAINLINK_READY_FOR_VIDEO, POLLING_SLEEP_US, POLLING_TIMEOUT_US); diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c index 2f6247e80e9d..da4323556ef3 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.c +++ b/drivers/gpu/drm/msm/dp/dp_debug.c @@ -24,240 +24,108 @@ struct dp_debug_private { struct dp_usbpd *usbpd; struct dp_link *link; struct dp_panel *panel; - struct drm_connector **connector; + struct drm_connector *connector; struct device *dev; struct drm_device *drm_dev; struct dp_debug dp_debug; }; -static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len) -{ - if (rc >= *max_size) { - DRM_ERROR("buffer overflow\n"); - return -EINVAL; - } - *len += rc; - *max_size = SZ_4K - *len; - - return 0; -} - -static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff, - size_t count, loff_t *ppos) +static int dp_debug_show(struct seq_file *seq, void *p) { - struct dp_debug_private *debug = file->private_data; - char *buf; - u32 len = 0, rc = 0; + struct dp_debug_private *debug = seq->private; u64 lclk = 0; - u32 max_size = SZ_4K; u32 link_params_rate; - struct drm_display_mode *drm_mode; + const struct drm_display_mode *drm_mode; if (!debug) return -ENODEV; - if (*ppos) - return 0; - - buf = kzalloc(SZ_4K, GFP_KERNEL); - if (!buf) - return -ENOMEM; - drm_mode = &debug->panel->dp_mode.drm_mode; - rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdp_panel\n\t\tmax_pclk_khz = %d\n", + seq_printf(seq, "\tname = %s\n", DEBUG_NAME); + seq_printf(seq, "\tdp_panel\n\t\tmax_pclk_khz = %d\n", debug->panel->max_pclk_khz); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdrm_dp_link\n\t\trate = %u\n", + seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n", debug->panel->link_info.rate); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tnum_lanes = %u\n", + seq_printf(seq, "\t\tnum_lanes = %u\n", debug->panel->link_info.num_lanes); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tcapabilities = %lu\n", + seq_printf(seq, "\t\tcapabilities = %lu\n", debug->panel->link_info.capabilities); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\tdp_panel_info:\n\t\tactive = %dx%d\n", + seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n", drm_mode->hdisplay, drm_mode->vdisplay); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tback_porch = %dx%d\n", + seq_printf(seq, "\t\tback_porch = %dx%d\n", drm_mode->htotal - drm_mode->hsync_end, drm_mode->vtotal - drm_mode->vsync_end); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tfront_porch = %dx%d\n", + seq_printf(seq, "\t\tfront_porch = %dx%d\n", drm_mode->hsync_start - drm_mode->hdisplay, drm_mode->vsync_start - drm_mode->vdisplay); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tsync_width = %dx%d\n", + seq_printf(seq, "\t\tsync_width = %dx%d\n", drm_mode->hsync_end - drm_mode->hsync_start, drm_mode->vsync_end - drm_mode->vsync_start); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tactive_low = %dx%d\n", + seq_printf(seq, "\t\tactive_low = %dx%d\n", debug->panel->dp_mode.h_active_low, debug->panel->dp_mode.v_active_low); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\th_skew = %d\n", + seq_printf(seq, "\t\th_skew = %d\n", drm_mode->hskew); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\trefresh rate = %d\n", + seq_printf(seq, "\t\trefresh rate = %d\n", drm_mode_vrefresh(drm_mode)); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tpixel clock khz = %d\n", + seq_printf(seq, "\t\tpixel clock khz = %d\n", drm_mode->clock); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tbpp = %d\n", + seq_printf(seq, "\t\tbpp = %d\n", debug->panel->dp_mode.bpp); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; /* Link Information */ - rc = snprintf(buf + len, max_size, - "\tdp_link:\n\t\ttest_requested = %d\n", + seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n", debug->link->sink_request); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tnum_lanes = %d\n", + seq_printf(seq, "\t\tnum_lanes = %d\n", debug->link->link_params.num_lanes); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - link_params_rate = debug->link->link_params.rate; - rc = snprintf(buf + len, max_size, - "\t\tbw_code = %d\n", + seq_printf(seq, "\t\tbw_code = %d\n", drm_dp_link_rate_to_bw_code(link_params_rate)); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - lclk = debug->link->link_params.rate * 1000; - rc = snprintf(buf + len, max_size, - "\t\tlclk = %lld\n", lclk); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tv_level = %d\n", + seq_printf(seq, "\t\tlclk = %lld\n", lclk); + seq_printf(seq, "\t\tv_level = %d\n", debug->link->phy_params.v_level); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - rc = snprintf(buf + len, max_size, - "\t\tp_level = %d\n", + seq_printf(seq, "\t\tp_level = %d\n", debug->link->phy_params.p_level); - if (dp_debug_check_buffer_overflow(rc, &max_size, &len)) - goto error; - - if (copy_to_user(user_buff, buf, len)) - goto error; - - *ppos += len; - kfree(buf); - return len; - error: - kfree(buf); - return -EINVAL; + return 0; } +DEFINE_SHOW_ATTRIBUTE(dp_debug); static int dp_test_data_show(struct seq_file *m, void *data) { - struct drm_device *dev; - struct dp_debug_private *debug; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; u32 bpc; - debug = m->private; - dev = debug->drm_dev; - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - bpc = debug->link->test_video.test_bit_depth; - seq_printf(m, "hdisplay: %d\n", - debug->link->test_video.test_h_width); - seq_printf(m, "vdisplay: %d\n", - debug->link->test_video.test_v_height); - seq_printf(m, "bpc: %u\n", - dp_link_bit_depth_to_bpc(bpc)); - } else - seq_puts(m, "0"); + if (connector->status == connector_status_connected) { + bpc = debug->link->test_video.test_bit_depth; + seq_printf(m, "hdisplay: %d\n", + debug->link->test_video.test_h_width); + seq_printf(m, "vdisplay: %d\n", + debug->link->test_video.test_v_height); + seq_printf(m, "bpc: %u\n", + dp_link_bit_depth_to_bpc(bpc)); + } else { + seq_puts(m, "0"); } - drm_connector_list_iter_end(&conn_iter); - return 0; } DEFINE_SHOW_ATTRIBUTE(dp_test_data); static int dp_test_type_show(struct seq_file *m, void *data) { - struct dp_debug_private *debug = m->private; - struct drm_device *dev = debug->drm_dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug = m->private; + const struct drm_connector *connector = debug->connector; - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) - seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); - else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); + if (connector->status == connector_status_connected) + seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN); + else + seq_puts(m, "0"); return 0; } @@ -269,14 +137,12 @@ static ssize_t dp_test_active_write(struct file *file, { char *input_buffer; int status = 0; - struct dp_debug_private *debug; - struct drm_device *dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + const struct dp_debug_private *debug; + const struct drm_connector *connector; int val = 0; debug = ((struct seq_file *)file->private_data)->private; - dev = debug->drm_dev; + connector = debug->connector; if (len == 0) return 0; @@ -287,30 +153,22 @@ static ssize_t dp_test_active_write(struct file *file, DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - status = kstrtoint(input_buffer, 10, &val); - if (status < 0) - break; - DRM_DEBUG_DRIVER("Got %d for test active\n", val); - /* To prevent erroneous activation of the compliance - * testing code, only accept an actual value of 1 here - */ - if (val == 1) - debug->panel->video_test = true; - else - debug->panel->video_test = false; + if (connector->status == connector_status_connected) { + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) { + kfree(input_buffer); + return status; } + DRM_DEBUG_DRIVER("Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + debug->panel->video_test = true; + else + debug->panel->video_test = false; } - drm_connector_list_iter_end(&conn_iter); kfree(input_buffer); - if (status < 0) - return status; *offp += len; return len; @@ -319,25 +177,16 @@ static ssize_t dp_test_active_write(struct file *file, static int dp_test_active_show(struct seq_file *m, void *data) { struct dp_debug_private *debug = m->private; - struct drm_device *dev = debug->drm_dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - if (connector->status == connector_status_connected) { - if (debug->panel->video_test) - seq_puts(m, "1"); - else - seq_puts(m, "0"); - } else + struct drm_connector *connector = debug->connector; + + if (connector->status == connector_status_connected) { + if (debug->panel->video_test) + seq_puts(m, "1"); + else seq_puts(m, "0"); + } else { + seq_puts(m, "0"); } - drm_connector_list_iter_end(&conn_iter); return 0; } @@ -349,11 +198,6 @@ static int dp_test_active_open(struct inode *inode, inode->i_private); } -static const struct file_operations dp_debug_fops = { - .open = simple_open, - .read = dp_debug_read_info, -}; - static const struct file_operations test_active_fops = { .owner = THIS_MODULE, .open = dp_test_active_open, @@ -391,7 +235,7 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor) struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, struct drm_minor *minor) + struct drm_connector *connector, struct drm_minor *minor) { int rc = 0; struct dp_debug_private *debug; diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h index 7eaedfbb149c..8c0d0b5178fd 100644 --- a/drivers/gpu/drm/msm/dp/dp_debug.h +++ b/drivers/gpu/drm/msm/dp/dp_debug.h @@ -43,7 +43,7 @@ struct dp_debug { */ struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, + struct drm_connector *connector, struct drm_minor *minor); /** @@ -60,7 +60,7 @@ void dp_debug_put(struct dp_debug *dp_debug); static inline struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel, struct dp_usbpd *usbpd, struct dp_link *link, - struct drm_connector **connector, struct drm_minor *minor) + struct drm_connector *connector, struct drm_minor *minor) { return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index fbe4c2cd52a3..aba8aa47ed76 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -10,6 +10,7 @@ #include <linux/component.h> #include <linux/of_irq.h> #include <linux/delay.h> +#include <drm/drm_panel.h> #include "msm_drv.h" #include "msm_kms.h" @@ -27,7 +28,6 @@ #include "dp_audio.h" #include "dp_debug.h" -static struct msm_dp *g_dp_display; #define HPD_STRING_SIZE 30 enum { @@ -79,6 +79,8 @@ struct dp_display_private { char *name; int irq; + unsigned int id; + /* state variables */ bool core_initialized; bool hpd_irq_on; @@ -116,11 +118,35 @@ struct dp_display_private { struct dp_audio *audio; }; +struct msm_dp_desc { + phys_addr_t io_start; + unsigned int connector_type; +}; + +struct msm_dp_config { + const struct msm_dp_desc *descs; + size_t num_descs; +}; + +static const struct msm_dp_config sc7180_dp_cfg = { + .descs = (const struct msm_dp_desc[]) { + [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort }, + }, + .num_descs = 1, +}; + static const struct of_device_id dp_dt_match[] = { - {.compatible = "qcom,sc7180-dp"}, + { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg }, {} }; +static struct dp_display_private *dev_get_dp_display_private(struct device *dev) +{ + struct msm_dp *dp = dev_get_drvdata(dev); + + return container_of(dp, struct dp_display_private, dp_display); +} + static int dp_add_event(struct dp_display_private *dp_priv, u32 event, u32 data, u32 delay) { @@ -197,25 +223,24 @@ static int dp_display_bind(struct device *dev, struct device *master, void *data) { int rc = 0; - struct dp_display_private *dp; - struct drm_device *drm; + struct dp_display_private *dp = dev_get_dp_display_private(dev); struct msm_drm_private *priv; + struct drm_device *drm; drm = dev_get_drvdata(master); - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); - dp->dp_display.drm_dev = drm; priv = drm->dev_private; - priv->dp = &(dp->dp_display); + priv->dp[dp->id] = &dp->dp_display; - rc = dp->parser->parse(dp->parser); + rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type); if (rc) { DRM_ERROR("device tree parsing failed\n"); goto end; } + dp->dp_display.panel_bridge = dp->parser->panel_bridge; + dp->aux->drm_dev = drm; rc = dp_aux_register(dp->aux); if (rc) { @@ -240,16 +265,13 @@ end: static void dp_display_unbind(struct device *dev, struct device *master, void *data) { - struct dp_display_private *dp; + struct dp_display_private *dp = dev_get_dp_display_private(dev); struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); - dp_power_client_deinit(dp->power); dp_aux_unregister(dp->aux); - priv->dp = NULL; + priv->dp[dp->id] = NULL; } static const struct component_ops dp_display_comp_ops = { @@ -379,38 +401,17 @@ static void dp_display_host_deinit(struct dp_display_private *dp) static int dp_display_usbpd_configure_cb(struct device *dev) { - int rc = 0; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - rc = -EINVAL; - goto end; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); dp_display_host_init(dp, false); - rc = dp_display_process_hpd_high(dp); -end: - return rc; + return dp_display_process_hpd_high(dp); } static int dp_display_usbpd_disconnect_cb(struct device *dev) { int rc = 0; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - rc = -EINVAL; - return rc; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); dp_add_event(dp, EV_USER_NOTIFICATION, false, 0); @@ -472,15 +473,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev) { int rc = 0; u32 sink_request; - struct dp_display_private *dp; - - if (!dev) { - DRM_ERROR("invalid dev\n"); - return -EINVAL; - } - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(dev); /* check for any test request issued by sink */ rc = dp_link_process_request(dp->link); @@ -647,7 +640,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data) DRM_DEBUG_DP("hpd_state=%d\n", state); /* signal the disconnect event early to ensure proper teardown */ - dp_display_handle_plugged_change(g_dp_display, false); + dp_display_handle_plugged_change(&dp->dp_display, false); /* enable HDP plug interrupt to prepare for next plugin */ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true); @@ -834,7 +827,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, return 0; } -static int dp_display_prepare(struct msm_dp *dp) +static int dp_display_prepare(struct msm_dp *dp_display) { return 0; } @@ -842,9 +835,7 @@ static int dp_display_prepare(struct msm_dp *dp) static int dp_display_enable(struct dp_display_private *dp, u32 data) { int rc = 0; - struct msm_dp *dp_display; - - dp_display = g_dp_display; + struct msm_dp *dp_display = &dp->dp_display; DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count); if (dp_display->power_on) { @@ -880,9 +871,7 @@ static int dp_display_post_enable(struct msm_dp *dp_display) static int dp_display_disable(struct dp_display_private *dp, u32 data) { - struct msm_dp *dp_display; - - dp_display = g_dp_display; + struct msm_dp *dp_display = &dp->dp_display; if (!dp_display->power_on) return 0; @@ -912,7 +901,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data) return 0; } -static int dp_display_unprepare(struct msm_dp *dp) +static int dp_display_unprepare(struct msm_dp *dp_display) { return 0; } @@ -1213,10 +1202,33 @@ int dp_display_request_irq(struct msm_dp *dp_display) return 0; } +static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev, + unsigned int *id) +{ + const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev); + struct resource *res; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return NULL; + + for (i = 0; i < cfg->num_descs; i++) { + if (cfg->descs[i].io_start == res->start) { + *id = i; + return &cfg->descs[i]; + } + } + + dev_err(&pdev->dev, "unknown displayport instance\n"); + return NULL; +} + static int dp_display_probe(struct platform_device *pdev) { int rc = 0; struct dp_display_private *dp; + const struct msm_dp_desc *desc; if (!pdev || !pdev->dev.of_node) { DRM_ERROR("pdev not found\n"); @@ -1227,8 +1239,13 @@ static int dp_display_probe(struct platform_device *pdev) if (!dp) return -ENOMEM; + desc = dp_display_get_desc(pdev, &dp->id); + if (!desc) + return -EINVAL; + dp->pdev = pdev; dp->name = "drm_dp"; + dp->dp_display.connector_type = desc->connector_type; rc = dp_init_sub_modules(dp); if (rc) { @@ -1237,14 +1254,13 @@ static int dp_display_probe(struct platform_device *pdev) } mutex_init(&dp->event_mutex); - g_dp_display = &dp->dp_display; /* Store DP audio handle inside DP display */ - g_dp_display->dp_audio = dp->audio; + dp->dp_display.dp_audio = dp->audio; init_completion(&dp->audio_comp); - platform_set_drvdata(pdev, g_dp_display); + platform_set_drvdata(pdev, &dp->dp_display); rc = component_add(&pdev->dev, &dp_display_comp_ops); if (rc) { @@ -1257,10 +1273,7 @@ static int dp_display_probe(struct platform_device *pdev) static int dp_display_remove(struct platform_device *pdev) { - struct dp_display_private *dp; - - dp = container_of(g_dp_display, - struct dp_display_private, dp_display); + struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev); dp_display_deinit_sub_modules(dp); @@ -1309,14 +1322,14 @@ static int dp_pm_resume(struct device *dev) * can not declared display is connected unless * HDMI cable is plugged in and sink_count of * dongle become 1 + * also only signal audio when disconnected */ - if (dp->link->sink_count) + if (dp->link->sink_count) { dp->dp_display.is_connected = true; - else + } else { dp->dp_display.is_connected = false; - - dp_display_handle_plugged_change(g_dp_display, - dp->dp_display.is_connected); + dp_display_handle_plugged_change(dp_display, false); + } DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n", dp->link->sink_count, dp->dp_display.is_connected, @@ -1429,7 +1442,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) dev = &dp->pdev->dev; dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd, - dp->link, &dp->dp_display.connector, + dp->link, dp->dp_display.connector, minor); if (IS_ERR(dp->debug)) { rc = PTR_ERR(dp->debug); diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 8b47cdabb67e..8e80e3bac394 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -15,9 +15,11 @@ struct msm_dp { struct device *codec_dev; struct drm_connector *connector; struct drm_encoder *encoder; + struct drm_bridge *panel_bridge; bool is_connected; bool audio_enabled; bool power_on; + unsigned int connector_type; hdmi_codec_plugged_cb plugged_cb; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 764f4b81017e..76856c4ee1d6 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include "msm_drv.h" @@ -147,7 +148,7 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) ret = drm_connector_init(dp_display->drm_dev, connector, &dp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); + dp_display->connector_type); if (ret) return ERR_PTR(ret); @@ -160,5 +161,15 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display) drm_connector_attach_encoder(connector, dp_display->encoder); + if (dp_display->panel_bridge) { + ret = drm_bridge_attach(dp_display->encoder, + dp_display->panel_bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret < 0) { + DRM_ERROR("failed to attach panel bridge: %d\n", ret); + return ERR_PTR(ret); + } + } + return connector; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index 2181b60e1d1d..71db10c0f262 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -234,7 +234,7 @@ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_edid_bpp, u32 mode_pclk_khz) { struct dp_panel_private *panel; - u32 bpp = mode_edid_bpp; + u32 bpp; if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) { DRM_ERROR("invalid input\n"); diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c index 0519dd3ac3c3..a7acc23f742b 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.c +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -6,11 +6,22 @@ #include <linux/of_gpio.h> #include <linux/phy/phy.h> +#include <drm/drm_of.h> #include <drm/drm_print.h> +#include <drm/drm_bridge.h> #include "dp_parser.h" #include "dp_reg.h" +#define DP_DEFAULT_AHB_OFFSET 0x0000 +#define DP_DEFAULT_AHB_SIZE 0x0200 +#define DP_DEFAULT_AUX_OFFSET 0x0200 +#define DP_DEFAULT_AUX_SIZE 0x0200 +#define DP_DEFAULT_LINK_OFFSET 0x0400 +#define DP_DEFAULT_LINK_SIZE 0x0C00 +#define DP_DEFAULT_P0_OFFSET 0x1000 +#define DP_DEFAULT_P0_SIZE 0x0400 + static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { .num = 2, .regs = { @@ -19,67 +30,73 @@ static const struct dp_regulator_cfg sdm845_dp_reg_cfg = { }, }; -static int msm_dss_ioremap(struct platform_device *pdev, - struct dss_io_data *io_data) -{ - struct resource *res = NULL; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("%pS->%s: msm_dss_get_res failed\n", - __builtin_return_address(0), __func__); - return -ENODEV; - } - - io_data->len = (u32)resource_size(res); - io_data->base = ioremap(res->start, io_data->len); - if (!io_data->base) { - DRM_ERROR("%pS->%s: ioremap failed\n", - __builtin_return_address(0), __func__); - return -EIO; - } - - return 0; -} - -static void msm_dss_iounmap(struct dss_io_data *io_data) +static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len) { - if (io_data->base) { - iounmap(io_data->base); - io_data->base = NULL; - } - io_data->len = 0; -} + struct resource *res; + void __iomem *base; -static void dp_parser_unmap_io_resources(struct dp_parser *parser) -{ - struct dp_io *io = &parser->io; + base = devm_platform_get_and_ioremap_resource(pdev, idx, &res); + if (!IS_ERR(base)) + *len = resource_size(res); - msm_dss_iounmap(&io->dp_controller); + return base; } static int dp_parser_ctrl_res(struct dp_parser *parser) { - int rc = 0; struct platform_device *pdev = parser->pdev; struct dp_io *io = &parser->io; + struct dss_io_data *dss = &io->dp_controller; + + dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len); + if (IS_ERR(dss->ahb.base)) + return PTR_ERR(dss->ahb.base); + + dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len); + if (IS_ERR(dss->aux.base)) { + /* + * The initial binding had a single reg, but in order to + * support variation in the sub-region sizes this was split. + * dp_ioremap() will fail with -EINVAL here if only a single + * reg is specified, so fill in the sub-region offsets and + * lengths based on this single region. + */ + if (PTR_ERR(dss->aux.base) == -EINVAL) { + if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) { + DRM_ERROR("legacy memory region not large enough\n"); + return -EINVAL; + } + + dss->ahb.len = DP_DEFAULT_AHB_SIZE; + dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET; + dss->aux.len = DP_DEFAULT_AUX_SIZE; + dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET; + dss->link.len = DP_DEFAULT_LINK_SIZE; + dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET; + dss->p0.len = DP_DEFAULT_P0_SIZE; + } else { + DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base); + return PTR_ERR(dss->aux.base); + } + } else { + dss->link.base = dp_ioremap(pdev, 2, &dss->link.len); + if (IS_ERR(dss->link.base)) { + DRM_ERROR("unable to remap link region: %pe\n", dss->link.base); + return PTR_ERR(dss->link.base); + } - rc = msm_dss_ioremap(pdev, &io->dp_controller); - if (rc) { - DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc); - goto err; + dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len); + if (IS_ERR(dss->p0.base)) { + DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base); + return PTR_ERR(dss->p0.base); + } } io->phy = devm_phy_get(&pdev->dev, "dp"); - if (IS_ERR(io->phy)) { - rc = PTR_ERR(io->phy); - goto err; - } + if (IS_ERR(io->phy)) + return PTR_ERR(io->phy); return 0; -err: - dp_parser_unmap_io_resources(parser); - return rc; } static int dp_parser_misc(struct dp_parser *parser) @@ -248,7 +265,28 @@ static int dp_parser_clock(struct dp_parser *parser) return 0; } -static int dp_parser_parse(struct dp_parser *parser) +static int dp_parser_find_panel(struct dp_parser *parser) +{ + struct device *dev = &parser->pdev->dev; + struct drm_panel *panel; + int rc; + + rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL); + if (rc) { + DRM_ERROR("failed to acquire DRM panel: %d\n", rc); + return rc; + } + + parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel); + if (IS_ERR(parser->panel_bridge)) { + DRM_ERROR("failed to create panel bridge\n"); + return PTR_ERR(parser->panel_bridge); + } + + return 0; +} + +static int dp_parser_parse(struct dp_parser *parser, int connector_type) { int rc = 0; @@ -269,6 +307,12 @@ static int dp_parser_parse(struct dp_parser *parser) if (rc) return rc; + if (connector_type == DRM_MODE_CONNECTOR_eDP) { + rc = dp_parser_find_panel(parser); + if (rc) + return rc; + } + /* Map the corresponding regulator information according to * version. Currently, since we only have one supported platform, * mapping the regulator directly. diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h index 34b49628bbaf..3172da089421 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.h +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -25,11 +25,18 @@ enum dp_pm_type { DP_MAX_PM }; -struct dss_io_data { - u32 len; +struct dss_io_region { + size_t len; void __iomem *base; }; +struct dss_io_data { + struct dss_io_region ahb; + struct dss_io_region aux; + struct dss_io_region link; + struct dss_io_region p0; +}; + static inline const char *dp_parser_pm_name(enum dp_pm_type module) { switch (module) { @@ -116,8 +123,9 @@ struct dp_parser { struct dp_display_data disp_data; const struct dp_regulator_cfg *regulator_cfg; u32 max_dp_lanes; + struct drm_bridge *panel_bridge; - int (*parse)(struct dp_parser *parser); + int (*parse)(struct dp_parser *parser, int connector_type); }; /** diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 614dc7f26f2c..5cd230a5d5d3 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -112,18 +112,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; - struct platform_device *pdev = to_platform_device(dev); - struct msm_dsi *msm_dsi; - - DBG(""); - msm_dsi = dsi_init(pdev); - if (IS_ERR(msm_dsi)) { - /* Don't fail the bind if the dsi port is not connected */ - if (PTR_ERR(msm_dsi) == -ENODEV) - return 0; - else - return PTR_ERR(msm_dsi); - } + struct msm_dsi *msm_dsi = dev_get_drvdata(dev); priv->dsi[msm_dsi->id] = msm_dsi; @@ -136,12 +125,8 @@ static void dsi_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct msm_drm_private *priv = drm->dev_private; struct msm_dsi *msm_dsi = dev_get_drvdata(dev); - int id = msm_dsi->id; - if (priv->dsi[id]) { - dsi_destroy(msm_dsi); - priv->dsi[id] = NULL; - } + priv->dsi[msm_dsi->id] = NULL; } static const struct component_ops dsi_ops = { @@ -149,15 +134,40 @@ static const struct component_ops dsi_ops = { .unbind = dsi_unbind, }; -static int dsi_dev_probe(struct platform_device *pdev) +int dsi_dev_attach(struct platform_device *pdev) { return component_add(&pdev->dev, &dsi_ops); } +void dsi_dev_detach(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dsi_ops); +} + +static int dsi_dev_probe(struct platform_device *pdev) +{ + struct msm_dsi *msm_dsi; + + DBG(""); + msm_dsi = dsi_init(pdev); + if (IS_ERR(msm_dsi)) { + /* Don't fail the bind if the dsi port is not connected */ + if (PTR_ERR(msm_dsi) == -ENODEV) + return 0; + else + return PTR_ERR(msm_dsi); + } + + return 0; +} + static int dsi_dev_remove(struct platform_device *pdev) { + struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); + DBG(""); - component_del(&pdev->dev, &dsi_ops); + dsi_destroy(msm_dsi); + return 0; } @@ -215,8 +225,10 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) + if (!msm_dsi_manager_validate_current_config(msm_dsi->id)) { + ret = -EINVAL; goto fail; + } msm_dsi->encoder = encoder; diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index b50db91cb8a7..bb39e7ca802d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -107,6 +107,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, u32 len); int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host); +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host); int msm_dsi_host_power_on(struct mipi_dsi_host *host, struct msm_dsi_phy_shared_timings *phy_shared_timings, bool is_bonded_dsi, struct msm_dsi_phy *phy); @@ -116,7 +118,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host); unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host); struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host); -int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); +int msm_dsi_host_register(struct mipi_dsi_host *host); void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_phy *src_phy); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index e269df285136..a6893cc45fe4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -106,7 +106,8 @@ struct msm_dsi_host { phys_addr_t ctrl_size; struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; - struct clk *bus_clks[DSI_BUS_CLK_MAX]; + int num_bus_clks; + struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; struct clk *byte_clk; struct clk *esc_clk; @@ -115,16 +116,16 @@ struct msm_dsi_host { struct clk *pixel_clk_src; struct clk *byte_intf_clk; - u32 byte_clk_rate; - u32 pixel_clk_rate; - u32 esc_clk_rate; + unsigned long byte_clk_rate; + unsigned long pixel_clk_rate; + unsigned long esc_clk_rate; /* DSI v2 specific clocks */ struct clk *src_clk; struct clk *esc_clk_src; struct clk *dsi_clk_src; - u32 src_clk_rate; + unsigned long src_clk_rate; struct gpio_desc *disp_en_gpio; struct gpio_desc *te_gpio; @@ -374,15 +375,14 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) int i, ret = 0; /* get bus clocks */ - for (i = 0; i < cfg->num_bus_clks; i++) { - msm_host->bus_clks[i] = msm_clk_get(pdev, - cfg->bus_clk_names[i]); - if (IS_ERR(msm_host->bus_clks[i])) { - ret = PTR_ERR(msm_host->bus_clks[i]); - pr_err("%s: Unable to get %s clock, ret = %d\n", - __func__, cfg->bus_clk_names[i], ret); - goto exit; - } + for (i = 0; i < cfg->num_bus_clks; i++) + msm_host->bus_clks[i].id = cfg->bus_clk_names[i]; + msm_host->num_bus_clks = cfg->num_bus_clks; + + ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret); + goto exit; } /* get link and source clocks */ @@ -433,41 +433,6 @@ exit: return ret; } -static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) -{ - const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; - int i, ret; - - DBG("id=%d", msm_host->id); - - for (i = 0; i < cfg->num_bus_clks; i++) { - ret = clk_prepare_enable(msm_host->bus_clks[i]); - if (ret) { - pr_err("%s: failed to enable bus clock %d ret %d\n", - __func__, i, ret); - goto err; - } - } - - return 0; -err: - for (; i > 0; i--) - clk_disable_unprepare(msm_host->bus_clks[i]); - - return ret; -} - -static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) -{ - const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; - int i; - - DBG(""); - - for (i = cfg->num_bus_clks - 1; i >= 0; i--) - clk_disable_unprepare(msm_host->bus_clks[i]); -} - int msm_dsi_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -478,7 +443,7 @@ int msm_dsi_runtime_suspend(struct device *dev) if (!msm_host->cfg_hnd) return 0; - dsi_bus_clk_disable(msm_host); + clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks); return 0; } @@ -493,15 +458,15 @@ int msm_dsi_runtime_resume(struct device *dev) if (!msm_host->cfg_hnd) return 0; - return dsi_bus_clk_enable(msm_host); + return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks); } int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) { - u32 byte_intf_rate; + unsigned long byte_intf_rate; int ret; - DBG("Set clk rates: pclk=%d, byteclk=%d", + DBG("Set clk rates: pclk=%d, byteclk=%lu", msm_host->mode->clock, msm_host->byte_clk_rate); ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, @@ -558,13 +523,11 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto pixel_clk_err; } - if (msm_host->byte_intf_clk) { - ret = clk_prepare_enable(msm_host->byte_intf_clk); - if (ret) { - pr_err("%s: Failed to enable byte intf clk\n", - __func__); - goto byte_intf_clk_err; - } + ret = clk_prepare_enable(msm_host->byte_intf_clk); + if (ret) { + pr_err("%s: Failed to enable byte intf clk\n", + __func__); + goto byte_intf_clk_err; } return 0; @@ -583,7 +546,7 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) { int ret; - DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d", + DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", msm_host->mode->clock, msm_host->byte_clk_rate, msm_host->esc_clk_rate, msm_host->src_clk_rate); @@ -660,8 +623,7 @@ void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host) dev_pm_opp_set_rate(&msm_host->pdev->dev, 0); clk_disable_unprepare(msm_host->esc_clk); clk_disable_unprepare(msm_host->pixel_clk); - if (msm_host->byte_intf_clk) - clk_disable_unprepare(msm_host->byte_intf_clk); + clk_disable_unprepare(msm_host->byte_intf_clk); clk_disable_unprepare(msm_host->byte_clk); } @@ -673,10 +635,10 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) clk_disable_unprepare(msm_host->byte_clk); } -static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) +static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { struct drm_display_mode *mode = msm_host->mode; - u32 pclk_rate; + unsigned long pclk_rate; pclk_rate = mode->clock * 1000; @@ -696,7 +658,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) { u8 lanes = msm_host->lanes; u32 bpp = dsi_get_bpp(msm_host->format); - u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); + unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); u64 pclk_bpp = (u64)pclk_rate * bpp; if (lanes == 0) { @@ -713,7 +675,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) msm_host->pixel_clk_rate = pclk_rate; msm_host->byte_clk_rate = pclk_bpp; - DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate, + DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, msm_host->byte_clk_rate); } @@ -772,7 +734,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; - DBG("esc=%d, src=%d", msm_host->esc_clk_rate, + DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate, msm_host->src_clk_rate); return 0; @@ -1624,6 +1586,10 @@ static int dsi_host_attach(struct mipi_dsi_host *host, if (ret) return ret; + ret = dsi_dev_attach(msm_host->pdev); + if (ret) + return ret; + DBG("id=%d", msm_host->id); if (msm_host->dev) queue_work(msm_host->workqueue, &msm_host->hpd_work); @@ -1636,6 +1602,8 @@ static int dsi_host_detach(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + dsi_dev_detach(msm_host->pdev); + msm_host->device_node = NULL; DBG("id=%d", msm_host->id); @@ -1696,6 +1664,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, if (!prop) { DRM_DEV_DEBUG(dev, "failed to find data lane mapping, using default\n"); + /* Set the number of date lanes to 4 by default. */ + msm_host->num_data_lanes = 4; return 0; } @@ -1898,6 +1868,23 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) return ret; } + msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (msm_host->irq < 0) { + ret = msm_host->irq; + dev_err(&pdev->dev, "failed to get irq: %d\n", ret); + return ret; + } + + /* do not autoenable, will be enabled later */ + ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN, + "dsi_isr", msm_host); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", + msm_host->irq, ret); + return ret; + } + init_completion(&msm_host->dma_comp); init_completion(&msm_host->video_comp); mutex_init(&msm_host->dev_mutex); @@ -1925,7 +1912,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host) DBG(""); dsi_tx_buf_free(msm_host); if (msm_host->workqueue) { - flush_workqueue(msm_host->workqueue); destroy_workqueue(msm_host->workqueue); msm_host->workqueue = NULL; } @@ -1941,25 +1927,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; - struct platform_device *pdev = msm_host->pdev; int ret; - msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (msm_host->irq < 0) { - ret = msm_host->irq; - DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret); - return ret; - } - - ret = devm_request_irq(&pdev->dev, msm_host->irq, - dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - "dsi_isr", msm_host); - if (ret < 0) { - DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n", - msm_host->irq, ret); - return ret; - } - msm_host->dev = dev; ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); if (ret) { @@ -1970,7 +1939,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, return 0; } -int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) +int msm_dsi_host_register(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); int ret; @@ -1984,20 +1953,6 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) return ret; msm_host->registered = true; - - /* If the panel driver has not been probed after host register, - * we should defer the host's probe. - * It makes sure panel is connected when fbcon detects - * connector status and gets the proper display mode to - * create framebuffer. - * Don't try to defer if there is nothing connected to the dsi - * output - */ - if (check_defer && msm_host->device_node) { - if (IS_ERR(of_drm_find_panel(msm_host->device_node))) - if (!of_drm_find_bridge(msm_host->device_node)) - return -EPROBE_DEFER; - } } return 0; @@ -2315,6 +2270,20 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, clk_req->escclk_rate = msm_host->esc_clk_rate; } +void msm_dsi_host_enable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + enable_irq(msm_host->irq); +} + +void msm_dsi_host_disable_irq(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + disable_irq(msm_host->irq); +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index c41d39f5b7cf..01bf8d907933 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -3,6 +3,8 @@ * Copyright (c) 2015, The Linux Foundation. All rights reserved. */ +#include "drm/drm_bridge_connector.h" + #include "msm_kms.h" #include "dsi.h" @@ -72,7 +74,7 @@ static int dsi_mgr_setup_components(int id) int ret; if (!IS_BONDED_DSI()) { - ret = msm_dsi_host_register(msm_dsi->host, true); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; @@ -92,10 +94,10 @@ static int dsi_mgr_setup_components(int id) * because only master DSI device adds the panel to global * panel list. The panel's device is the master DSI device. */ - ret = msm_dsi_host_register(slave_link_dsi->host, false); + ret = msm_dsi_host_register(slave_link_dsi->host); if (ret) return ret; - ret = msm_dsi_host_register(master_link_dsi->host, true); + ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; @@ -377,6 +379,14 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) } } + /* + * Enable before preparing the panel, disable after unpreparing, so + * that the panel can communicate over the DSI link. + */ + msm_dsi_host_enable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_enable_irq(msm_dsi1->host); + /* Always call panel functions once, because even for dual panels, * there is only one drm_panel instance. */ @@ -411,6 +421,10 @@ host_en_fail: if (panel) drm_panel_unprepare(panel); panel_prep_fail: + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_disable_irq(msm_dsi1->host); + if (is_bonded_dsi && msm_dsi1) msm_dsi_host_power_off(msm_dsi1->host); host1_on_fail: @@ -523,6 +537,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) id, ret); } + msm_dsi_host_disable_irq(host); + if (is_bonded_dsi && msm_dsi1) + msm_dsi_host_disable_irq(msm_dsi1->host); + /* Save PHY status if it is a clock source */ msm_dsi_phy_pll_save_state(msm_dsi->phy); @@ -688,10 +706,10 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_device *dev = msm_dsi->dev; + struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge *int_bridge, *ext_bridge; - struct drm_connector *connector; - struct list_head *connector_list; + int ret; int_bridge = msm_dsi->bridge; ext_bridge = msm_dsi->external_bridge = @@ -699,22 +717,44 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) encoder = msm_dsi->encoder; - /* link the internal dsi bridge to the external bridge */ - drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); - /* - * we need the drm_connector created by the external bridge - * driver (or someone else) to feed it to our driver's - * priv->connector[] list, mainly for msm_fbdev_init() + * Try first to create the bridge without it creating its own + * connector.. currently some bridges support this, and others + * do not (and some support both modes) */ - connector_list = &dev->mode_config.connector_list; + ret = drm_bridge_attach(encoder, ext_bridge, int_bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret == -EINVAL) { + struct drm_connector *connector; + struct list_head *connector_list; + + /* link the internal dsi bridge to the external bridge */ + drm_bridge_attach(encoder, ext_bridge, int_bridge, 0); + + /* + * we need the drm_connector created by the external bridge + * driver (or someone else) to feed it to our driver's + * priv->connector[] list, mainly for msm_fbdev_init() + */ + connector_list = &dev->mode_config.connector_list; + + list_for_each_entry(connector, connector_list, head) { + if (drm_connector_has_possible_encoder(connector, encoder)) + return connector; + } + + return ERR_PTR(-ENODEV); + } - list_for_each_entry(connector, connector_list, head) { - if (drm_connector_has_possible_encoder(connector, encoder)) - return connector; + connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(connector)) { + DRM_ERROR("Unable to create bridge connector\n"); + return ERR_CAST(connector); } - return ERR_PTR(-ENODEV); + drm_connector_attach_encoder(connector, encoder); + + return connector; } void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 8c65ef6968ca..9842e04b5858 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -627,6 +627,8 @@ static const struct of_device_id dsi_phy_dt_match[] = { .data = &dsi_phy_14nm_cfgs }, { .compatible = "qcom,dsi-phy-14nm-660", .data = &dsi_phy_14nm_660_cfgs }, + { .compatible = "qcom,dsi-phy-14nm-8953", + .data = &dsi_phy_14nm_8953_cfgs }, #endif #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY { .compatible = "qcom,dsi-phy-10nm", diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index b91303ada74f..4c8257581bfc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c index d13552b2213b..7414966f198e 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -110,14 +110,13 @@ static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, u32 nb_tries, u32 timeout_us) { - bool pll_locked = false; + bool pll_locked = false, pll_ready = false; void __iomem *base = pll_14nm->phy->pll_base; u32 tries, val; tries = nb_tries; while (tries--) { - val = dsi_phy_read(base + - REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); pll_locked = !!(val & BIT(5)); if (pll_locked) @@ -126,23 +125,24 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, udelay(timeout_us); } - if (!pll_locked) { - tries = nb_tries; - while (tries--) { - val = dsi_phy_read(base + - REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); - pll_locked = !!(val & BIT(0)); + if (!pll_locked) + goto out; - if (pll_locked) - break; + tries = nb_tries; + while (tries--) { + val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_ready = !!(val & BIT(0)); + + if (pll_ready) + break; - udelay(timeout_us); - } + udelay(timeout_us); } - DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); +out: + DBG("DSI PLL is %slocked, %sready", pll_locked ? "" : "*not* ", pll_ready ? "" : "*not* "); - return pll_locked; + return pll_locked && pll_ready; } static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf) @@ -213,9 +213,7 @@ static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_conf DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); - div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); - - dec_start = div_u64(dec_start_multiple, multiplier); + dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); pconf->dec_start = (u32)dec_start; pconf->div_frac_start = div_frac_start; @@ -1065,3 +1063,24 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = { .io_start = { 0xc994400, 0xc996000 }, .num_dsi_phy = 2, }; + +const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = { + .has_phy_lane = true, + .reg_cfg = { + .num = 1, + .regs = { + {"vcca", 17000, 32}, + }, + }, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .pll_init = dsi_pll_14nm_init, + .save_pll_state = dsi_14nm_pll_save_state, + .restore_pll_state = dsi_14nm_pll_restore_state, + }, + .min_pll_rate = VCO_MIN_RATE, + .max_pll_rate = VCO_MAX_RATE, + .io_start = { 0x1a94400, 0x1a96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index aaa37456f4ee..71ed4aa0dc67 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -428,7 +428,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9; snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id); - snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id); + snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id + 1); bytediv_init.name = clk_name; bytediv_init.ops = &clk_bytediv_ops; @@ -442,7 +442,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov return ret; provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw; - snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id); + snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id + 1); /* DIV3 */ hw = devm_clk_hw_register_divider(dev, clk_name, parent_name, 0, pll_28nm->phy->pll_base + diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index cb297b08458e..079613d2aaa9 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -114,9 +114,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config multiplier = 1 << FRAC_BITS; dec_multiple = div_u64(pll_freq * multiplier, divider); - div_u64_rem(dec_multiple, multiplier, &frac); - - dec = div_u64(dec_multiple, multiplier); + dec = div_u64_rem(dec_multiple, multiplier, &frac); if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)) config->pll_clock_inverters = 0x28; diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 4fb397ee7c84..a68a4a1867c1 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -1116,7 +1116,7 @@ void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on) int msm_edp_ctrl_init(struct msm_edp *edp) { struct edp_ctrl *ctrl = NULL; - struct device *dev = &edp->pdev->dev; + struct device *dev; int ret; if (!edp) { @@ -1124,6 +1124,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp) return -EINVAL; } + dev = &edp->pdev->dev; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; @@ -1189,7 +1190,6 @@ void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl) return; if (ctrl->workqueue) { - flush_workqueue(ctrl->workqueue); destroy_workqueue(ctrl->workqueue); ctrl->workqueue = NULL; } @@ -1242,8 +1242,6 @@ bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl) int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, struct drm_connector *connector, struct edid **edid) { - int ret = 0; - mutex_lock(&ctrl->dev_mutex); if (ctrl->edid) { @@ -1278,7 +1276,7 @@ disable_ret: } unlock_ret: mutex_unlock(&ctrl->dev_mutex); - return ret; + return 0; } int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 737453b6e596..75b64e6ae035 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -61,10 +61,8 @@ static void msm_hdmi_destroy(struct hdmi *hdmi) * at this point, hpd has been disabled, * after flush workq, it's safe to deinit hdcp */ - if (hdmi->workq) { - flush_workqueue(hdmi->workq); + if (hdmi->workq) destroy_workqueue(hdmi->workq); - } msm_hdmi_hdcp_destroy(hdmi); if (hdmi->phy_dev) { @@ -154,19 +152,13 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ret = -ENOMEM; goto fail; } - for (i = 0; i < config->hpd_reg_cnt; i++) { - struct regulator *reg; - - reg = devm_regulator_get(&pdev->dev, - config->hpd_reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } + for (i = 0; i < config->hpd_reg_cnt; i++) + hdmi->hpd_regs[i].supply = config->hpd_reg_names[i]; - hdmi->hpd_regs[i] = reg; + ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %d\n", ret); + goto fail; } hdmi->pwr_regs = devm_kcalloc(&pdev->dev, @@ -177,19 +169,11 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev) ret = -ENOMEM; goto fail; } - for (i = 0; i < config->pwr_reg_cnt; i++) { - struct regulator *reg; - - reg = devm_regulator_get(&pdev->dev, - config->pwr_reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - goto fail; - } - hdmi->pwr_regs[i] = reg; + ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) { + DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret); + goto fail; } hdmi->hpd_clks = devm_kcalloc(&pdev->dev, diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d0b84f0abee1..82261078c6b1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -56,8 +56,8 @@ struct hdmi { void __iomem *qfprom_mmio; phys_addr_t mmio_phy_addr; - struct regulator **hpd_regs; - struct regulator **pwr_regs; + struct regulator_bulk_data *hpd_regs; + struct regulator_bulk_data *pwr_regs; struct clk **hpd_clks; struct clk **pwr_clks; @@ -163,7 +163,7 @@ struct hdmi_phy { void __iomem *mmio; struct hdmi_phy_cfg *cfg; const struct hdmi_phy_funcs *funcs; - struct regulator **regs; + struct regulator_bulk_data *regs; struct clk **clks; }; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 6e380db9287b..f04eb4a70f0d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -28,13 +28,9 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge) pm_runtime_get_sync(&hdmi->pdev->dev); - for (i = 0; i < config->pwr_reg_cnt; i++) { - ret = regulator_enable(hdmi->pwr_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - } - } + ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret); if (config->pwr_clk_cnt > 0) { DBG("pixclock: %lu", hdmi->pixclock); @@ -70,13 +66,9 @@ static void power_off(struct drm_bridge *bridge) for (i = 0; i < config->pwr_clk_cnt; i++) clk_disable_unprepare(hdmi->pwr_clks[i]); - for (i = 0; i < config->pwr_reg_cnt; i++) { - ret = regulator_disable(hdmi->pwr_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n", - config->pwr_reg_names[i], ret); - } - } + ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs); + if (ret) + DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret); pm_runtime_put_autosuspend(&hdmi->pdev->dev); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index 58707a1f3878..a7f729cdec7b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -146,16 +146,13 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector) const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; uint32_t hpd_ctrl; - int i, ret; + int ret; unsigned long flags; - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_enable(hdmi->hpd_regs[i]); - if (ret) { - DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - goto fail; - } + ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) { + DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret); + goto fail; } ret = pinctrl_pm_select_default_state(dev); @@ -207,7 +204,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) struct hdmi *hdmi = hdmi_connector->hdmi; const struct hdmi_platform_config *config = hdmi->config; struct device *dev = &hdmi->pdev->dev; - int i, ret = 0; + int ret; /* Disable HPD interrupt */ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0); @@ -225,12 +222,9 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector) if (ret) dev_warn(dev, "pinctrl state chg failed: %d\n", ret); - for (i = 0; i < config->hpd_reg_cnt; i++) { - ret = regulator_disable(hdmi->hpd_regs[i]); - if (ret) - dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n", - config->hpd_reg_names[i], ret); - } + ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs); + if (ret) + dev_warn(dev, "failed to disable hpd regulator: %d\n", ret); } static void diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c index 8a38d4b95102..16b0e8836d27 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c @@ -23,22 +23,15 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy) if (!phy->clks) return -ENOMEM; - for (i = 0; i < cfg->num_regs; i++) { - struct regulator *reg; - - reg = devm_regulator_get(dev, cfg->reg_names[i]); - if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - if (ret != -EPROBE_DEFER) { - DRM_DEV_ERROR(dev, - "failed to get phy regulator: %s (%d)\n", - cfg->reg_names[i], ret); - } + for (i = 0; i < cfg->num_regs; i++) + phy->regs[i].supply = cfg->reg_names[i]; - return ret; - } + ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs); + if (ret) { + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret); - phy->regs[i] = reg; + return ret; } for (i = 0; i < cfg->num_clks; i++) { @@ -66,11 +59,10 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy) pm_runtime_get_sync(dev); - for (i = 0; i < cfg->num_regs; i++) { - ret = regulator_enable(phy->regs[i]); - if (ret) - DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n", - cfg->reg_names[i], ret); + ret = regulator_bulk_enable(cfg->num_regs, phy->regs); + if (ret) { + DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret); + return ret; } for (i = 0; i < cfg->num_clks; i++) { @@ -92,8 +84,7 @@ void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy) for (i = cfg->num_clks - 1; i >= 0; i--) clk_disable_unprepare(phy->clks[i]); - for (i = cfg->num_regs - 1; i >= 0; i--) - regulator_disable(phy->regs[i]); + regulator_bulk_disable(cfg->num_regs, phy->regs); pm_runtime_put_sync(dev); } diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index a8f3b2cbfdc5..99c7853353fd 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -682,7 +682,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw) return pll_locked; } -static struct clk_ops hdmi_8996_pll_ops = { +static const struct clk_ops hdmi_8996_pll_ops = { .set_rate = hdmi_8996_pll_set_clk_rate, .round_rate = hdmi_8996_pll_round_rate, .recalc_rate = hdmi_8996_pll_recalc_rate, @@ -695,7 +695,7 @@ static const char * const hdmi_pll_parents[] = { "xo", }; -static struct clk_init_data pll_init = { +static const struct clk_init_data pll_init = { .name = "hdmipll", .ops = &hdmi_8996_pll_ops, .parent_names = hdmi_pll_parents, diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index fab09e7c6efc..27c9ae563f2f 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -116,20 +116,10 @@ out: trace_msm_atomic_async_commit_finish(crtc_mask); } -static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) -{ - struct msm_pending_timer *timer = container_of(t, - struct msm_pending_timer, timer); - - kthread_queue_work(timer->worker, &timer->work); - - return HRTIMER_NORESTART; -} - static void msm_atomic_pending_work(struct kthread_work *work) { struct msm_pending_timer *timer = container_of(work, - struct msm_pending_timer, work); + struct msm_pending_timer, work.work); msm_atomic_async_commit(timer->kms, timer->crtc_idx); } @@ -139,8 +129,6 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, { timer->kms = kms; timer->crtc_idx = crtc_idx; - hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - timer->timer.function = msm_atomic_pending_timer; timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); if (IS_ERR(timer->worker)) { @@ -149,7 +137,10 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, return ret; } sched_set_fifo(timer->worker->task); - kthread_init_work(&timer->work, msm_atomic_pending_work); + + msm_hrtimer_work_init(&timer->work, timer->worker, + msm_atomic_pending_work, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); return 0; } @@ -258,7 +249,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) vsync_time = kms->funcs->vsync_time(kms, async_crtc); wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); - hrtimer_start(&timer->timer, wakeup_time, + msm_hrtimer_queue_work(&timer->work, wakeup_time, HRTIMER_MODE_ABS); } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 09d2d279c30a..dee13fedee3b 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); + msm_gpu_hw_init(gpu); show_priv->state = gpu->funcs->gpu_state_get(gpu); pm_runtime_put_sync(&gpu->pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 2e6fc185e54d..892c04365239 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -58,7 +58,7 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { }; #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING -static bool reglog = false; +static bool reglog; MODULE_PARM_DESC(reglog, "Enable register read/write logging"); module_param(reglog, bool, 0600); #else @@ -75,7 +75,7 @@ static char *vram = "16m"; MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); module_param(vram, charp, 0); -bool dumpstate = false; +bool dumpstate; MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); module_param(dumpstate, bool, 0600); @@ -200,6 +200,35 @@ void msm_rmw(void __iomem *addr, u32 mask, u32 or) msm_writel(val | or, addr); } +static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t) +{ + struct msm_hrtimer_work *work = container_of(t, + struct msm_hrtimer_work, timer); + + kthread_queue_work(work->worker, &work->work); + + return HRTIMER_NORESTART; +} + +void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, + ktime_t wakeup_time, + enum hrtimer_mode mode) +{ + hrtimer_start(&work->timer, wakeup_time, mode); +} + +void msm_hrtimer_work_init(struct msm_hrtimer_work *work, + struct kthread_worker *worker, + kthread_work_func_t fn, + clockid_t clock_id, + enum hrtimer_mode mode) +{ + hrtimer_init(&work->timer, clock_id, mode); + work->timer.function = msm_hrtimer_worktimer; + work->worker = worker; + kthread_init_work(&work->work, fn); +} + static irqreturn_t msm_irq(int irq, void *arg) { struct drm_device *dev = arg; @@ -630,10 +659,11 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (ret) goto err_msm_uninit; - ret = msm_disp_snapshot_init(ddev); - if (ret) - DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); - + if (kms) { + ret = msm_disp_snapshot_init(ddev); + if (ret) + DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret); + } drm_mode_config_reset(ddev); #ifdef CONFIG_DRM_FBDEV_EMULATION @@ -682,6 +712,7 @@ static void load_gpu(struct drm_device *dev) static int context_init(struct drm_device *dev, struct drm_file *file) { + static atomic_t ident = ATOMIC_INIT(0); struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx; @@ -689,12 +720,17 @@ static int context_init(struct drm_device *dev, struct drm_file *file) if (!ctx) return -ENOMEM; + INIT_LIST_HEAD(&ctx->submitqueues); + rwlock_init(&ctx->queuelock); + kref_init(&ctx->ref); msm_submitqueue_init(dev, ctx); ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current); file->driver_priv = ctx; + ctx->seqno = atomic_inc_return(&ident); + return 0; } @@ -931,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, return ret; } -static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, - struct drm_file *file) +static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, + ktime_t timeout) { - struct msm_drm_private *priv = dev->dev_private; - struct drm_msm_wait_fence *args = data; - ktime_t timeout = to_ktime(args->timeout); - struct msm_gpu_submitqueue *queue; - struct msm_gpu *gpu = priv->gpu; struct dma_fence *fence; int ret; - if (args->pad) { - DRM_ERROR("invalid pad: %08x\n", args->pad); + if (fence_id > queue->last_fence) { + DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", + fence_id, queue->last_fence); return -EINVAL; } - if (!gpu) - return 0; - - queue = msm_submitqueue_get(file->driver_priv, args->queueid); - if (!queue) - return -ENOENT; - /* * Map submitqueue scoped "seqno" (which is actually an idr key) * back to underlying dma-fence @@ -965,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ret = mutex_lock_interruptible(&queue->lock); if (ret) return ret; - fence = idr_find(&queue->fence_idr, args->fence); + fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); mutex_unlock(&queue->lock); @@ -981,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, } dma_fence_put(fence); + + return ret; +} + +static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_wait_fence *args = data; + struct msm_gpu_submitqueue *queue; + int ret; + + if (args->pad) { + DRM_ERROR("invalid pad: %08x\n", args->pad); + return -EINVAL; + } + + if (!priv->gpu) + return 0; + + queue = msm_submitqueue_get(file->driver_priv, args->queueid); + if (!queue) + return -ENOENT; + + ret = wait_fence(queue, args->fence, to_ktime(args->timeout)); + msm_submitqueue_put(queue); return ret; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8b005d1ac899..eb984d925f4d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -53,14 +53,6 @@ struct msm_disp_state; #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) -struct msm_file_private { - rwlock_t queuelock; - struct list_head submitqueues; - int queueid; - struct msm_gem_address_space *aspace; - struct kref ref; -}; - enum msm_mdp_plane_property { PLANE_PROP_ZPOS, PLANE_PROP_ALPHA, @@ -68,6 +60,13 @@ enum msm_mdp_plane_property { PLANE_PROP_MAX_NUM }; +enum msm_dp_controller { + MSM_DP_CONTROLLER_0, + MSM_DP_CONTROLLER_1, + MSM_DP_CONTROLLER_2, + MSM_DP_CONTROLLER_COUNT, +}; + #define MSM_GPU_MAX_RINGS 4 #define MAX_H_TILES_PER_DISPLAY 2 @@ -161,7 +160,7 @@ struct msm_drm_private { /* DSI is shared by mdp4 and mdp5 */ struct msm_dsi *dsi[2]; - struct msm_dp *dp; + struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT]; /* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu; @@ -344,6 +343,8 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct msm_dsi; #ifdef CONFIG_DRM_MSM_DSI +int dsi_dev_attach(struct platform_device *pdev); +void dsi_dev_detach(struct platform_device *pdev); void __init msm_dsi_register(void); void __exit msm_dsi_unregister(void); int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, @@ -488,40 +489,27 @@ void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); void msm_rmw(void __iomem *addr, u32 mask, u32 or); -struct msm_gpu_submitqueue; -int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); -struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, - u32 id); -int msm_submitqueue_create(struct drm_device *drm, - struct msm_file_private *ctx, - u32 prio, u32 flags, u32 *id); -int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, - struct drm_msm_submitqueue_query *args); -int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); -void msm_submitqueue_close(struct msm_file_private *ctx); - -void msm_submitqueue_destroy(struct kref *kref); - -static inline void __msm_file_private_destroy(struct kref *kref) -{ - struct msm_file_private *ctx = container_of(kref, - struct msm_file_private, ref); - - msm_gem_address_space_put(ctx->aspace); - kfree(ctx); -} - -static inline void msm_file_private_put(struct msm_file_private *ctx) -{ - kref_put(&ctx->ref, __msm_file_private_destroy); -} +/** + * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work + * + * @timer: hrtimer to control when the kthread work is triggered + * @work: the kthread work + * @worker: the kthread worker the work will be scheduled on + */ +struct msm_hrtimer_work { + struct hrtimer timer; + struct kthread_work work; + struct kthread_worker *worker; +}; -static inline struct msm_file_private *msm_file_private_get( - struct msm_file_private *ctx) -{ - kref_get(&ctx->ref); - return ctx; -} +void msm_hrtimer_queue_work(struct msm_hrtimer_work *work, + ktime_t wakeup_time, + enum hrtimer_mode mode); +void msm_hrtimer_work_init(struct msm_hrtimer_work *work, + struct kthread_worker *worker, + kthread_work_func_t fn, + clockid_t clock_id, + enum hrtimer_mode mode); #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) @@ -547,7 +535,7 @@ static inline int align_pitch(int width, int bpp) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) { ktime_t now = ktime_get(); - unsigned long remaining_jiffies; + s64 remaining_jiffies; if (ktime_compare(*timeout, now) < 0) { remaining_jiffies = 0; @@ -556,7 +544,7 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); } - return remaining_jiffies; + return clamp(remaining_jiffies, 0LL, (s64)INT_MAX); } #endif /* __MSM_DRV_H__ */ diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 40a9863f5951..02b9ae65a96a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -5,6 +5,7 @@ */ #include <linux/dma-map-ops.h> +#include <linux/vmalloc.h> #include <linux/spinlock.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> @@ -865,23 +866,11 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj) } #ifdef CONFIG_DEBUG_FS -static void describe_fence(struct dma_fence *fence, const char *type, - struct seq_file *m) -{ - if (!dma_fence_is_signaled(fence)) - seq_printf(m, "\t%9s: %s %s seq %llu\n", type, - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->seqno); -} - void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, struct msm_gem_stats *stats) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; - struct dma_resv_list *fobj; - struct dma_fence *fence; struct msm_gem_vma *vma; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; @@ -955,22 +944,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, seq_puts(m, "\n"); } - rcu_read_lock(); - fobj = dma_resv_shared_list(robj); - if (fobj) { - unsigned int i, shared_count = fobj->shared_count; - - for (i = 0; i < shared_count; i++) { - fence = rcu_dereference(fobj->shared[i]); - describe_fence(fence, "Shared", m); - } - } - - fence = dma_resv_excl_fence(robj); - if (fence) - describe_fence(fence, "Exclusive", m); - rcu_read_unlock(); - + dma_resv_describe(robj, m); msm_gem_unlock(obj); } @@ -1055,8 +1029,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct { struct msm_gem_object *msm_obj = to_msm_bo(obj); - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); return 0; @@ -1120,7 +1093,7 @@ static int msm_gem_new_impl(struct drm_device *dev, break; fallthrough; default: - DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", + DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); return -EINVAL; } @@ -1132,6 +1105,7 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; + INIT_LIST_HEAD(&msm_obj->node); INIT_LIST_HEAD(&msm_obj->vmas); *obj = &msm_obj->base; @@ -1166,7 +1140,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 ret = msm_gem_new_impl(dev, size, flags, &obj); if (ret) - goto fail; + return ERR_PTR(ret); msm_obj = to_msm_bo(obj); @@ -1250,7 +1224,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); if (ret) - goto fail; + return ERR_PTR(ret); drm_gem_private_object_init(dev, obj, size); diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 0f1b29ee04a9..086dacf2f26a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -4,6 +4,9 @@ * Author: Rob Clark <robdclark@gmail.com> */ +#include <linux/vmalloc.h> +#include <linux/sched/mm.h> + #include "msm_drv.h" #include "msm_gem.h" #include "msm_gpu.h" diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 924b01b9c105..282628d6b72c 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -46,7 +46,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, if (!submit) return ERR_PTR(-ENOMEM); - ret = drm_sched_job_init(&submit->base, &queue->entity, queue); + ret = drm_sched_job_init(&submit->base, queue->entity, queue); if (ret) { kfree(submit); return ERR_PTR(ret); @@ -161,7 +161,8 @@ out: static int submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { - unsigned i, sz; + unsigned i; + size_t sz; int ret = 0; for (i = 0; i < args->nr_cmds; i++) { @@ -771,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->nr_cmds); if (IS_ERR(submit)) { ret = PTR_ERR(submit); + submit = NULL; goto out_unlock; } @@ -903,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, drm_sched_entity_push_job(&submit->base); args->fence = submit->fence_id; + queue->last_fence = submit->fence_id; msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); msm_process_post_deps(post_deps, args->nr_out_syncobjs, diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 8a3a592da3a4..2c46cd968ac4 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -296,7 +296,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, state->bos = kcalloc(nr, sizeof(struct msm_gpu_state_bo), GFP_KERNEL); - for (i = 0; i < submit->nr_bos; i++) { + for (i = 0; state->bos && i < submit->nr_bos; i++) { if (should_dump(submit, i)) { msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, submit->bos[i].iova, submit->bos[i].flags); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 0e4b45bff2e6..48ea2de911f1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -112,6 +112,13 @@ struct msm_gpu_devfreq { * it is inactive. */ unsigned long idle_freq; + + /** + * idle_work: + * + * Used to delay clamping to idle freq on active->idle transition. + */ + struct msm_hrtimer_work idle_work; }; struct msm_gpu { @@ -203,6 +210,10 @@ struct msm_gpu { uint32_t suspend_count; struct msm_gpu_state *crashstate; + + /* Enable clamping to idle freq when inactive: */ + bool clamp_to_idle; + /* True if the hardware supports expanded apriv (a650 and newer) */ bool hw_apriv; @@ -258,6 +269,39 @@ struct msm_gpu_perfcntr { #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) /** + * struct msm_file_private - per-drm_file context + * + * @queuelock: synchronizes access to submitqueues list + * @submitqueues: list of &msm_gpu_submitqueue created by userspace + * @queueid: counter incremented each time a submitqueue is created, + * used to assign &msm_gpu_submitqueue.id + * @aspace: the per-process GPU address-space + * @ref: reference count + * @seqno: unique per process seqno + */ +struct msm_file_private { + rwlock_t queuelock; + struct list_head submitqueues; + int queueid; + struct msm_gem_address_space *aspace; + struct kref ref; + int seqno; + + /** + * entities: + * + * Table of per-priority-level sched entities used by submitqueues + * associated with this &drm_file. Because some userspace apps + * make assumptions about rendering from multiple gl contexts + * (of the same priority) within the process happening in FIFO + * order without requiring any fencing beyond MakeCurrent(), we + * create at most one &drm_sched_entity per-process per-priority- + * level. + */ + struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; +}; + +/** * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority * * @gpu: the gpu instance @@ -304,6 +348,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, } /** + * struct msm_gpu_submitqueues - Userspace created context. + * * A submitqueue is associated with a gl context or vk queue (or equiv) * in userspace. * @@ -313,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * @ring_nr: the ringbuffer used by this submitqueue, which is determined * by the submitqueue's priority * @faults: the number of GPU hangs associated with this submitqueue + * @last_fence: the sequence number of the last allocated fence (for error + * checking) * @ctx: the per-drm_file context associated with the submitqueue (ie. * which set of pgtables do submits jobs associated with the * submitqueue use) @@ -321,19 +369,20 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * seqno, protected by submitqueue lock * @lock: submitqueue lock * @ref: reference count - * @entity: the submit job-queue + * @entity: the submit job-queue */ struct msm_gpu_submitqueue { int id; u32 flags; u32 ring_nr; int faults; + uint32_t last_fence; struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; struct mutex lock; struct kref ref; - struct drm_sched_entity entity; + struct drm_sched_entity *entity; }; struct msm_gpu_state_bo { @@ -421,6 +470,33 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); +int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id); +int msm_submitqueue_create(struct drm_device *drm, + struct msm_file_private *ctx, + u32 prio, u32 flags, u32 *id); +int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx, + struct drm_msm_submitqueue_query *args); +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); +void msm_submitqueue_close(struct msm_file_private *ctx); + +void msm_submitqueue_destroy(struct kref *kref); + +void __msm_file_private_destroy(struct kref *kref); + +static inline void msm_file_private_put(struct msm_file_private *ctx) +{ + kref_put(&ctx->ref, __msm_file_private_destroy); +} + +static inline struct msm_file_private *msm_file_private_get( + struct msm_file_private *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} + void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 0a1ee20296a2..384e90c4b2a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; + /* + * Note that devfreq_recommended_opp() can modify the freq + * to something that actually is in the opp table: + */ opp = devfreq_recommended_opp(dev, freq, flags); /* @@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, */ if (gpu->devfreq.idle_freq) { gpu->devfreq.idle_freq = *freq; + dev_pm_opp_put(opp); return 0; } @@ -88,8 +93,12 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; +static void msm_devfreq_idle_work(struct kthread_work *work); + void msm_devfreq_init(struct msm_gpu *gpu) { + struct msm_gpu_devfreq *df = &gpu->devfreq; + /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) return; @@ -105,25 +114,27 @@ void msm_devfreq_init(struct msm_gpu *gpu) msm_devfreq_profile.freq_table = NULL; msm_devfreq_profile.max_state = 0; - gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev, + df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev, &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); - if (IS_ERR(gpu->devfreq.devfreq)) { + if (IS_ERR(df->devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); - gpu->devfreq.devfreq = NULL; + df->devfreq = NULL; return; } - devfreq_suspend_device(gpu->devfreq.devfreq); + devfreq_suspend_device(df->devfreq); - gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, - gpu->devfreq.devfreq); + gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq); if (IS_ERR(gpu->cooling)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't register GPU cooling device\n"); gpu->cooling = NULL; } + + msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); } void msm_devfreq_cleanup(struct msm_gpu *gpu) @@ -151,6 +162,14 @@ void msm_devfreq_active(struct msm_gpu *gpu) unsigned int idle_time; unsigned long target_freq = df->idle_freq; + if (!df->devfreq) + return; + + /* + * Cancel any pending transition to idle frequency: + */ + hrtimer_cancel(&df->idle_work.timer); + /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -181,9 +200,12 @@ void msm_devfreq_active(struct msm_gpu *gpu) mutex_unlock(&df->devfreq->lock); } -void msm_devfreq_idle(struct msm_gpu *gpu) + +static void msm_devfreq_idle_work(struct kthread_work *work) { - struct msm_gpu_devfreq *df = &gpu->devfreq; + struct msm_gpu_devfreq *df = container_of(work, + struct msm_gpu_devfreq, idle_work.work); + struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); unsigned long idle_freq, target_freq = 0; /* @@ -194,10 +216,22 @@ void msm_devfreq_idle(struct msm_gpu *gpu) idle_freq = get_freq(gpu); - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); + if (gpu->clamp_to_idle) + msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); df->idle_time = ktime_get(); df->idle_freq = idle_freq; mutex_unlock(&df->devfreq->lock); } + +void msm_devfreq_idle(struct msm_gpu *gpu) +{ + struct msm_gpu_devfreq *df = &gpu->devfreq; + + if (!df->devfreq) + return; + + msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), + HRTIMER_MODE_REL); +} diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index de2bc3467bb5..6a42b819abc4 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -136,8 +136,7 @@ struct msm_kms; * shortly before vblank to flush pending async updates. */ struct msm_pending_timer { - struct hrtimer timer; - struct kthread_work work; + struct msm_hrtimer_work work; struct kthread_worker *worker; struct msm_kms *kms; unsigned crtc_idx; diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 32a55d81b58b..7cb158bcbcf6 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -7,6 +7,24 @@ #include "msm_gpu.h" +void __msm_file_private_destroy(struct kref *kref) +{ + struct msm_file_private *ctx = container_of(kref, + struct msm_file_private, ref); + int i; + + for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) { + if (!ctx->entities[i]) + continue; + + drm_sched_entity_destroy(ctx->entities[i]); + kfree(ctx->entities[i]); + } + + msm_gem_address_space_put(ctx->aspace); + kfree(ctx); +} + void msm_submitqueue_destroy(struct kref *kref) { struct msm_gpu_submitqueue *queue = container_of(kref, @@ -14,8 +32,6 @@ void msm_submitqueue_destroy(struct kref *kref) idr_destroy(&queue->fence_idr); - drm_sched_entity_destroy(&queue->entity); - msm_file_private_put(queue->ctx); kfree(queue); @@ -61,13 +77,48 @@ void msm_submitqueue_close(struct msm_file_private *ctx) } } +static struct drm_sched_entity * +get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring, + unsigned ring_nr, enum drm_sched_priority sched_prio) +{ + static DEFINE_MUTEX(entity_lock); + unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio; + + /* We should have already validated that the requested priority is + * valid by the time we get here. + */ + if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities))) + return ERR_PTR(-EINVAL); + + mutex_lock(&entity_lock); + + if (!ctx->entities[idx]) { + struct drm_sched_entity *entity; + struct drm_gpu_scheduler *sched = &ring->sched; + int ret; + + entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL); + + ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL); + if (ret) { + mutex_unlock(&entity_lock); + kfree(entity); + return ERR_PTR(ret); + } + + ctx->entities[idx] = entity; + } + + mutex_unlock(&entity_lock); + + return ctx->entities[idx]; +} + int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, u32 prio, u32 flags, u32 *id) { struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; - struct msm_ringbuffer *ring; - struct drm_gpu_scheduler *sched; enum drm_sched_priority sched_prio; unsigned ring_nr; int ret; @@ -91,12 +142,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, queue->flags = flags; queue->ring_nr = ring_nr; - ring = priv->gpu->rb[ring_nr]; - sched = &ring->sched; - - ret = drm_sched_entity_init(&queue->entity, - sched_prio, &sched, 1, NULL); - if (ret) { + queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], + ring_nr, sched_prio); + if (IS_ERR(queue->entity)) { + ret = PTR_ERR(queue->entity); kfree(queue); return ret; } @@ -140,10 +189,6 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) */ default_prio = DIV_ROUND_UP(max_priority, 2); - INIT_LIST_HEAD(&ctx->submitqueues); - - rwlock_init(&ctx->queuelock); - return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); } diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig index ee22cd25d3e3..987170e16ebd 100644 --- a/drivers/gpu/drm/mxsfb/Kconfig +++ b/drivers/gpu/drm/mxsfb/Kconfig @@ -10,7 +10,7 @@ config DRM_MXSFB depends on COMMON_CLK select DRM_MXS select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_PANEL select DRM_PANEL_BRIDGE help diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index ec0432fe1bdf..86d78634a979 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm) struct mxsfb_drm_private *mxsfb = drm->dev_private; mxsfb_enable_axi_clk(mxsfb); - mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc); + + /* Disable and clear VBLANK IRQ */ + writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR); + writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); + mxsfb_disable_axi_clk(mxsfb); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 7739f46470d3..99fee4d8cd31 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -205,7 +205,7 @@ nv04_display_destroy(struct drm_device *dev) nvif_notify_dtor(&disp->flip); nouveau_display(dev)->priv = NULL; - kfree(disp); + vfree(disp); nvif_object_unmap(&drm->client.device.object); } @@ -223,7 +223,7 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); + disp = vzalloc(sizeof(*disp)); if (!disp) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild index 4488e1c061b3..28be2912ff74 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild +++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild @@ -13,6 +13,7 @@ nouveau-y += dispnv50/corec57d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crc907d.o nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc37d.o +nouveau-$(CONFIG_DEBUG_FS) += dispnv50/crcc57d.o nouveau-y += dispnv50/dac507d.o nouveau-y += dispnv50/dac907d.o diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c index 5396e3707cc4..e6b0417c325b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c @@ -103,12 +103,9 @@ base907c_xlut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) return 0; } -static bool +static void base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size != 256 && size != 1024) - return false; - if (size == 1024) asyw->xlut.i.mode = NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE; else @@ -116,7 +113,6 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) asyw->xlut.i.enable = NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE; asyw->xlut.i.load = head907d_olut_load; - return true; } static inline u32 diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c index 75876546eac1..53b1e2a569c1 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c @@ -69,7 +69,7 @@ corec57d = { .head = &headc57d, .sor = &sorc37d, #if IS_ENABLED(CONFIG_DEBUG_FS) - .crc = &crcc37d, + .crc = &crcc57d, #endif }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c b/drivers/gpu/drm/nouveau/dispnv50/crc.c index b8c31b697797..29428e770f14 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c @@ -84,7 +84,10 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base) struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work); struct nv50_head *head = container_of(crc, struct nv50_head, crc); struct drm_crtc *crtc = &head->base.base; - struct nv50_disp *disp = nv50_disp(crtc->dev); + struct drm_device *dev = crtc->dev; + struct nv50_disp *disp = nv50_disp(dev); + const uint64_t start_vbl = drm_crtc_vblank_count(crtc); + uint64_t end_vbl; u8 new_idx = crc->ctx_idx ^ 1; /* @@ -92,23 +95,24 @@ static void nv50_crc_ctx_flip_work(struct kthread_work *base) * try again for the next vblank if we don't grab the lock */ if (!mutex_trylock(&disp->mutex)) { - DRM_DEV_DEBUG_KMS(crtc->dev->dev, - "Lock contended, delaying CRC ctx flip for head-%d\n", - head->base.index); - drm_vblank_work_schedule(work, - drm_crtc_vblank_count(crtc) + 1, - true); + drm_dbg_kms(dev, "Lock contended, delaying CRC ctx flip for %s\n", crtc->name); + drm_vblank_work_schedule(work, start_vbl + 1, true); return; } - DRM_DEV_DEBUG_KMS(crtc->dev->dev, - "Flipping notifier ctx for head %d (%d -> %d)\n", - drm_crtc_index(crtc), crc->ctx_idx, new_idx); + drm_dbg_kms(dev, "Flipping notifier ctx for %s (%d -> %d)\n", + crtc->name, crc->ctx_idx, new_idx); nv50_crc_program_ctx(head, NULL); nv50_crc_program_ctx(head, &crc->ctx[new_idx]); mutex_unlock(&disp->mutex); + end_vbl = drm_crtc_vblank_count(crtc); + if (unlikely(end_vbl != start_vbl)) + NV_ERROR(nouveau_drm(dev), + "Failed to flip CRC context on %s on time (%llu > %llu)\n", + crtc->name, end_vbl, start_vbl); + spin_lock_irq(&crc->lock); crc->ctx_changed = true; spin_unlock_irq(&crc->lock); @@ -189,9 +193,9 @@ void nv50_crc_handle_vblank(struct nv50_head *head) * updates back-to-back without waiting, we'll just be * optimistic and assume we always miss exactly one frame. */ - DRM_DEV_DEBUG_KMS(head->base.base.dev->dev, - "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n", - head->base.index, crc->frame); + drm_dbg_kms(head->base.base.dev, + "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n", + head->base.index, crc->frame); crc->frame++; nv50_crc_reset_ctx(ctx); @@ -347,8 +351,6 @@ int nv50_crc_atomic_check_head(struct nv50_head *head, struct nv50_head_atom *armh) { struct nv50_atom *atom = nv50_atom(asyh->state.state); - struct drm_device *dev = head->base.base.dev; - struct nv50_disp *disp = nv50_disp(dev); bool changed = armh->crc.src != asyh->crc.src; if (!armh->crc.src && !asyh->crc.src) { @@ -357,30 +359,7 @@ int nv50_crc_atomic_check_head(struct nv50_head *head, return 0; } - /* While we don't care about entry tags, Volta+ hw always needs the - * controlling wndw channel programmed to a wndw that's owned by our - * head - */ - if (asyh->crc.src && disp->disp->object.oclass >= GV100_DISP && - !(BIT(asyh->crc.wndw) & asyh->wndw.owned)) { - if (!asyh->wndw.owned) { - /* TODO: once we support flexible channel ownership, - * we should write some code here to handle attempting - * to "steal" a plane: e.g. take a plane that is - * currently not-visible and owned by another head, - * and reassign it to this head. If we fail to do so, - * we shuld reject the mode outright as CRC capture - * then becomes impossible. - */ - NV_ATOMIC(nouveau_drm(dev), - "No available wndws for CRC readback\n"); - return -EINVAL; - } - asyh->crc.wndw = ffs(asyh->wndw.owned) - 1; - } - - if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed || - armh->crc.wndw != asyh->crc.wndw) { + if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed) { asyh->clr.crc = armh->crc.src && armh->state.active; asyh->set.crc = asyh->crc.src && asyh->state.active; if (changed) @@ -467,9 +446,8 @@ void nv50_crc_atomic_set(struct nv50_head *head, struct nouveau_encoder *outp = nv50_real_outp(nv50_head_atom_get_encoder(asyh)); - func->set_src(head, outp->or, - nv50_crc_source_type(outp, asyh->crc.src), - &crc->ctx[crc->ctx_idx], asyh->crc.wndw); + func->set_src(head, outp->or, nv50_crc_source_type(outp, asyh->crc.src), + &crc->ctx[crc->ctx_idx]); } void nv50_crc_atomic_clr(struct nv50_head *head) @@ -477,7 +455,7 @@ void nv50_crc_atomic_clr(struct nv50_head *head) const struct nv50_crc_func *func = nv50_disp(head->base.base.dev)->core->func->crc; - func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0); + func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL); } static inline int @@ -704,6 +682,7 @@ static const struct file_operations nv50_crc_flip_threshold_fops = { .open = nv50_crc_debugfs_flip_threshold_open, .read = seq_read, .write = nv50_crc_debugfs_flip_threshold_set, + .release = single_release, }; int nv50_head_crc_late_register(struct nv50_head *head) diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.h b/drivers/gpu/drm/nouveau/dispnv50/crc.h index 4fce871b04c8..4823f1fde2dd 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc.h +++ b/drivers/gpu/drm/nouveau/dispnv50/crc.h @@ -45,13 +45,11 @@ struct nv50_crc_notifier_ctx { struct nv50_crc_atom { enum nv50_crc_source src; - /* Only used for gv100+ */ - u8 wndw : 4; }; struct nv50_crc_func { - int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type, - struct nv50_crc_notifier_ctx *, u32 wndw); + int (*set_src)(struct nv50_head *, int or, enum nv50_crc_source_type type, + struct nv50_crc_notifier_ctx *ctx); int (*set_ctx)(struct nv50_head *, struct nv50_crc_notifier_ctx *); u32 (*get_entry)(struct nv50_head *, struct nv50_crc_notifier_ctx *, enum nv50_crc_source, int idx); @@ -95,6 +93,7 @@ void nv50_crc_atomic_clr(struct nv50_head *); extern const struct nv50_crc_func crc907d; extern const struct nv50_crc_func crcc37d; +extern const struct nv50_crc_func crcc57d; #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ struct nv50_crc {}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c index 0fb0fdb9f119..f9ad641555b7 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crc907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crc907d.c @@ -23,9 +23,8 @@ struct crc907d_notifier { } __packed; static int -crc907d_set_src(struct nv50_head *head, int or, - enum nv50_crc_source_type source, - struct nv50_crc_notifier_ctx *ctx, u32 wndw) +crc907d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; @@ -33,7 +32,8 @@ crc907d_set_src(struct nv50_head *head, int or, NVDEF(NV907D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | NVDEF(NV907D, HEAD_SET_CRC_CONTROL, TIMESTAMP_MODE, FALSE) | NVDEF(NV907D, HEAD_SET_CRC_CONTROL, SECONDARY_OUTPUT, NONE) | - NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); + NVDEF(NV907D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE) | + NVDEF(NV907D, HEAD_SET_CRC_CONTROL, WIDE_PIPE_CRC, ENABLE); int ret; switch (source) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c index 814e5bd97446..f10f6c484408 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c @@ -2,6 +2,7 @@ #include <drm/drm_crtc.h> #include "crc.h" +#include "crcc37d.h" #include "core.h" #include "disp.h" #include "head.h" @@ -10,38 +11,13 @@ #include <nvhw/class/clc37d.h> -#define CRCC37D_MAX_ENTRIES 2047 - -struct crcc37d_notifier { - u32 status; - - /* reserved */ - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - u32 :32; - - struct crcc37d_entry { - u32 status[2]; - u32 :32; /* reserved */ - u32 compositor_crc; - u32 rg_crc; - u32 output_crc[2]; - u32 :32; /* reserved */ - } entries[CRCC37D_MAX_ENTRIES]; -} __packed; - static int -crcc37d_set_src(struct nv50_head *head, int or, - enum nv50_crc_source_type source, - struct nv50_crc_notifier_ctx *ctx, u32 wndw) +crcc37d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; - u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, wndw) | + u32 crc_args = NVVAL(NVC37D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, i * 4) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) | NVDEF(NVC37D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); @@ -75,8 +51,7 @@ crcc37d_set_src(struct nv50_head *head, int or, return 0; } -static int -crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) +int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) { struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; const int i = head->base.index; @@ -89,9 +64,8 @@ crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) return 0; } -static u32 crcc37d_get_entry(struct nv50_head *head, - struct nv50_crc_notifier_ctx *ctx, - enum nv50_crc_source source, int idx) +u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx, + enum nv50_crc_source source, int idx) { struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr; struct crcc37d_entry __iomem *entry = ¬ifier->entries[idx]; @@ -105,8 +79,7 @@ static u32 crcc37d_get_entry(struct nv50_head *head, return ioread32_native(crc_addr); } -static bool crcc37d_ctx_finished(struct nv50_head *head, - struct nv50_crc_notifier_ctx *ctx) +bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx) { struct nouveau_drm *drm = nouveau_drm(head->base.base.dev); struct crcc37d_notifier __iomem *notifier = ctx->mem.object.map.ptr; @@ -148,7 +121,7 @@ const struct nv50_crc_func crcc37d = { .set_ctx = crcc37d_set_ctx, .get_entry = crcc37d_get_entry, .ctx_finished = crcc37d_ctx_finished, - .flip_threshold = CRCC37D_MAX_ENTRIES - 30, + .flip_threshold = CRCC37D_FLIP_THRESHOLD, .num_entries = CRCC37D_MAX_ENTRIES, .notifier_len = sizeof(struct crcc37d_notifier), }; diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h new file mode 100644 index 000000000000..5775137b832d --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef __CRCC37D_H__ +#define __CRCC37D_H__ + +#include <linux/types.h> + +#include "crc.h" + +#define CRCC37D_MAX_ENTRIES 2047 +#define CRCC37D_FLIP_THRESHOLD (CRCC37D_MAX_ENTRIES - 30) + +struct crcc37d_notifier { + u32 status; + + /* reserved */ + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + u32:32; + + struct crcc37d_entry { + u32 status[2]; + u32:32; /* reserved */ + u32 compositor_crc; + u32 rg_crc; + u32 output_crc[2]; + u32:32; /* reserved */ + } entries[CRCC37D_MAX_ENTRIES]; +} __packed; + +int crcc37d_set_ctx(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx); +u32 crcc37d_get_entry(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx, + enum nv50_crc_source source, int idx); +bool crcc37d_ctx_finished(struct nv50_head *head, struct nv50_crc_notifier_ctx *ctx); + +#endif /* !__CRCC37D_H__ */ diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c new file mode 100644 index 000000000000..cc0130e3d496 --- /dev/null +++ b/drivers/gpu/drm/nouveau/dispnv50/crcc57d.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MIT + +#include "crc.h" +#include "crcc37d.h" +#include "core.h" +#include "disp.h" +#include "head.h" + +#include <nvif/pushc37b.h> + +#include <nvhw/class/clc57d.h> + +static int crcc57d_set_src(struct nv50_head *head, int or, enum nv50_crc_source_type source, + struct nv50_crc_notifier_ctx *ctx) +{ + struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; + const int i = head->base.index; + u32 crc_args = NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CONTROLLING_CHANNEL, CORE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, EXPECT_BUFFER_COLLAPSE, FALSE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, SECONDARY_CRC, NONE) | + NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, CRC_DURING_SNOOZE, DISABLE); + int ret; + + switch (source) { + case NV50_CRC_SOURCE_TYPE_SOR: + crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SOR(or)); + break; + case NV50_CRC_SOURCE_TYPE_SF: + crc_args |= NVDEF(NVC57D, HEAD_SET_CRC_CONTROL, PRIMARY_CRC, SF); + break; + default: + break; + } + + ret = PUSH_WAIT(push, 4); + if (ret) + return ret; + + if (source) { + PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), ctx->ntfy.handle); + PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), crc_args); + } else { + PUSH_MTHD(push, NVC57D, HEAD_SET_CRC_CONTROL(i), 0); + PUSH_MTHD(push, NVC57D, HEAD_SET_CONTEXT_DMA_CRC(i), 0); + } + + return 0; +} + +const struct nv50_crc_func crcc57d = { + .set_src = crcc57d_set_src, + .set_ctx = crcc37d_set_ctx, + .get_entry = crcc37d_get_entry, + .ctx_finished = crcc37d_ctx_finished, + .flip_threshold = CRCC37D_FLIP_THRESHOLD, + .num_entries = CRCC37D_MAX_ENTRIES, + .notifier_len = sizeof(struct crcc37d_notifier), +}; diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 54fbd6fe751d..00e19fd959ea 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -98,6 +98,7 @@ static int curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) { + struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); struct nv50_head *head = nv50_head(asyw->state.crtc); int ret; @@ -109,8 +110,20 @@ curs507a_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, if (ret || !asyh->curs.visible) return ret; - if (asyw->image.w != asyw->image.h) + if (asyw->state.crtc_w != asyw->state.crtc_h) { + NV_ATOMIC(drm, "Plane width/height must be equal for cursors\n"); return -EINVAL; + } + + if (asyw->image.w != asyw->state.crtc_w) { + NV_ATOMIC(drm, "Plane width must be equal to fb width for cursors (height can be larger though)\n"); + return -EINVAL; + } + + if (asyw->state.src_x || asyw->state.src_y) { + NV_ATOMIC(drm, "Cursor planes do not support framebuffer offsets\n"); + return -EINVAL; + } ret = head->func->curs_layout(head, asyw, asyh); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index d7b9f7f8c9e3..ae1f41205520 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -852,6 +852,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, &nv_connector->base, mode); if (!ret) { + drm_hdmi_avi_infoframe_quant_range(&avi_frame.avi, + &nv_connector->base, mode, + HDMI_QUANTIZATION_RANGE_FULL); /* We have an AVI InfoFrame, populate it to the display */ args.pwr.avi_infoframe_length = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17); @@ -1387,12 +1390,11 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; - int ret; NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name); - ret = drm_dp_check_act_status(&mstm->mgr); + drm_dp_check_act_status(&mstm->mgr); - ret = drm_dp_update_payload_part2(&mstm->mgr); + drm_dp_update_payload_part2(&mstm->mgr); drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { @@ -1411,10 +1413,9 @@ nv50_mstm_prepare(struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; - int ret; NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); - ret = drm_dp_update_payload_part1(&mstm->mgr); + drm_dp_update_payload_part1(&mstm->mgr, 1); drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index d66f97280282..c3c57be54e1c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -52,6 +52,7 @@ nv50_head_flush_clr(struct nv50_head *head, void nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh) { + if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.olut ) { asyh->olut.offset = nv50_lut_load(&head->olut, asyh->olut.buffer, @@ -67,7 +68,6 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) if (asyh->set.view ) head->func->view (head, asyh); if (asyh->set.mode ) head->func->mode (head, asyh); if (asyh->set.core ) head->func->core_set(head, asyh); - if (asyh->set.curs ) head->func->curs_set(head, asyh); if (asyh->set.base ) head->func->base (head, asyh); if (asyh->set.ovly ) head->func->ovly (head, asyh); if (asyh->set.dither ) head->func->dither (head, asyh); @@ -226,10 +226,24 @@ static int nv50_head_atomic_check_lut(struct nv50_head *head, struct nv50_head_atom *asyh) { - struct nv50_disp *disp = nv50_disp(head->base.base.dev); - struct drm_property_blob *olut = asyh->state.gamma_lut; + struct drm_device *dev = head->base.base.dev; + struct drm_crtc *crtc = &head->base.base; + struct nv50_disp *disp = nv50_disp(dev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct drm_property_blob *olut = asyh->state.gamma_lut, + *ilut = asyh->state.degamma_lut; int size; + /* Ensure that the ilut is valid */ + if (ilut) { + size = drm_color_lut_size(ilut); + if (!head->func->ilut_check(size)) { + NV_ATOMIC(drm, "Invalid size %d for degamma on [CRTC:%d:%s]\n", + size, crtc->base.id, crtc->name); + return -EINVAL; + } + } + /* Determine whether core output LUT should be enabled. */ if (olut) { /* Check if any window(s) have stolen the core output LUT @@ -256,7 +270,8 @@ nv50_head_atomic_check_lut(struct nv50_head *head, } if (!head->func->olut(head, asyh, size)) { - DRM_DEBUG_KMS("Invalid olut\n"); + NV_ATOMIC(drm, "Invalid size %d for gamma on [CRTC:%d:%s]\n", + size, crtc->base.id, crtc->name); return -EINVAL; } asyh->olut.handle = disp->core->chan.vram.handle; @@ -330,8 +345,17 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) struct drm_connector_state *conns; struct drm_connector *conn; int i, ret; + bool check_lut = asyh->state.color_mgmt_changed || + memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw)); NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active); + + if (check_lut) { + ret = nv50_head_atomic_check_lut(head, asyh); + if (ret) + return ret; + } + if (asyh->state.active) { for_each_new_connector_in_state(asyh->state.state, conn, conns, i) { if (conns->crtc == crtc) { @@ -357,14 +381,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) if (asyh->state.mode_changed || asyh->state.connectors_changed) nv50_head_atomic_check_mode(head, asyh); - if (asyh->state.color_mgmt_changed || - memcmp(&armh->wndw, &asyh->wndw, sizeof(asyh->wndw))) { - int ret = nv50_head_atomic_check_lut(head, asyh); - if (ret) - return ret; - + if (check_lut) asyh->olut.visible = asyh->olut.handle != 0; - } if (asyc) { if (asyc->set.scaler) diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h index 0bac6be9ba34..41c8788dfb31 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.h +++ b/drivers/gpu/drm/nouveau/dispnv50/head.h @@ -29,6 +29,7 @@ struct nv50_head_func { int (*view)(struct nv50_head *, struct nv50_head_atom *); int (*mode)(struct nv50_head *, struct nv50_head_atom *); bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int); + bool (*ilut_check)(int size); bool olut_identity; int olut_size; int (*olut_set)(struct nv50_head *, struct nv50_head_atom *); @@ -71,6 +72,7 @@ extern const struct nv50_head_func head907d; int head907d_view(struct nv50_head *, struct nv50_head_atom *); int head907d_mode(struct nv50_head *, struct nv50_head_atom *); bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int); +bool head907d_ilut_check(int size); int head907d_olut_set(struct nv50_head *, struct nv50_head_atom *); int head907d_olut_clr(struct nv50_head *); int head907d_core_set(struct nv50_head *, struct nv50_head_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c index 85648d790743..18fe4c1e2d6a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c @@ -314,6 +314,11 @@ head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) return true; } +bool head907d_ilut_check(int size) +{ + return size == 256 || size == 1024; +} + int head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) { @@ -409,6 +414,7 @@ head907d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index ea9f8667305e..4ce47b55f72c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -119,6 +119,7 @@ head917d = { .view = head907d_view, .mode = head907d_mode, .olut = head907d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = head907d_olut_set, .olut_clr = head907d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c index 63adfeba50e5..a4a3b78ea42c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c @@ -285,6 +285,7 @@ headc37d = { .view = headc37d_view, .mode = headc37d_mode, .olut = headc37d_olut, + .ilut_check = head907d_ilut_check, .olut_size = 1024, .olut_set = headc37d_olut_set, .olut_clr = headc37d_olut_clr, diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c index fd51527b56b8..543f08ceaad6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c @@ -169,7 +169,7 @@ headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -bool +static bool headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh, int size) { if (size != 0 && size != 256 && size != 1024) @@ -236,6 +236,7 @@ headc57d = { .view = headc37d_view, .mode = headc57d_mode, .olut = headc57d_olut, + .ilut_check = head907d_ilut_check, .olut_identity = true, .olut_size = 1024, .olut_set = headc57d_olut_set, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 8d048bacd6f0..133c8736426a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -403,10 +403,7 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw, /* Recalculate LUT state. */ memset(&asyw->xlut, 0x00, sizeof(asyw->xlut)); if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) { - if (!wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut))) { - DRM_DEBUG_KMS("Invalid ilut\n"); - return -EINVAL; - } + wndw->func->ilut(wndw, asyw, drm_color_lut_size(ilut)); asyw->xlut.handle = wndw->wndw.vram.handle; asyw->xlut.i.buffer = !asyw->xlut.i.buffer; asyw->set.xlut = true; @@ -539,6 +536,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) struct nouveau_bo *nvbo; struct nv50_head_atom *asyh; struct nv50_wndw_ctxdma *ctxdma; + struct dma_resv_iter cursor; + struct dma_fence *fence; int ret; NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb); @@ -561,7 +560,13 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) asyw->image.handle[0] = ctxdma->object.handle; } - asyw->state.fence = dma_resv_get_excl_unlocked(nvbo->bo.base.resv); + dma_resv_iter_begin(&cursor, nvbo->bo.base.resv, false); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + /* TODO: We only use the first writer here */ + asyw->state.fence = dma_fence_get(fence); + break; + } + dma_resv_iter_end(&cursor); asyw->image.offset[0] = nvbo->offset; if (wndw->func->prepare) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index f4e0c5080034..9c9f2c2a71a5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -64,7 +64,7 @@ struct nv50_wndw_func { int (*ntfy_clr)(struct nv50_wndw *); int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset, struct nvif_device *); - bool (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *, int); + void (*ilut)(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyh, int size); void (*csc)(struct nv50_wndw *, struct nv50_wndw_atom *, const struct drm_color_ctm *); int (*csc_set)(struct nv50_wndw *, struct nv50_wndw_atom *); @@ -129,7 +129,7 @@ int wndwc37e_update(struct nv50_wndw *, u32 *); int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); -bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int); +void wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int); int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *); int wndwc57e_ilut_clr(struct nv50_wndw *); int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *); diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c index 57df997c5ff3..183d2c0e65b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c @@ -82,18 +82,14 @@ wndwc37e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) return 0; } -static bool +static void wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size != 256 && size != 1024) - return false; - asyw->xlut.i.size = size == 1024 ? NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_1025 : NVC37E_SET_CONTROL_INPUT_LUT_SIZE_SIZE_257; asyw->xlut.i.range = NVC37E_SET_CONTROL_INPUT_LUT_RANGE_UNITY; asyw->xlut.i.output_mode = NVC37E_SET_CONTROL_INPUT_LUT_OUTPUT_MODE_INTERPOLATE; asyw->xlut.i.load = head907d_olut_load; - return true; } int diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c index abdd3bb658b3..37f6da8b3f2a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c @@ -179,11 +179,11 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem) writew(readw(mem - 4), mem + 4); } -bool +void wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) { - if (size = size ? size : 1024, size != 256 && size != 1024) - return false; + if (!size) + size = 1024; if (size == 256) asyw->xlut.i.mode = NVC57E_SET_ILUT_CONTROL_MODE_DIRECT8; @@ -193,7 +193,6 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size) asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */; asyw->xlut.i.output_mode = NVC57E_SET_ILUT_CONTROL_INTERPOLATE_DISABLE; asyw->xlut.i.load = wndwc57e_ilut_load; - return true; } /**************************************************************** diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h index 79aff6ff3138..f972ef1409f4 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl907d.h @@ -246,6 +246,9 @@ #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 5:5 #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) #define NV907D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC 6:6 +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_DISABLE (0x00000000) +#define NV907D_HEAD_SET_CRC_CONTROL_WIDE_PIPE_CRC_ENABLE (0x00000001) #define NV907D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00000438 + (a)*0x00000300) #define NV907D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 #define NV907D_HEAD_SET_OUTPUT_LUT_LO(a) (0x00000448 + (a)*0x00000300) diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h index d83ac815e06c..d4bad2da3e56 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/clc57d.h @@ -265,6 +265,75 @@ #define NVC57D_HEAD_SET_RASTER_BLANK_START(a) (0x00002070 + (a)*0x00000400) #define NVC57D_HEAD_SET_RASTER_BLANK_START_X 14:0 #define NVC57D_HEAD_SET_RASTER_BLANK_START_Y 30:16 +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC(a) (0x00002180 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CONTEXT_DMA_CRC_HANDLE 31:0 +#define NVC57D_HEAD_SET_CRC_CONTROL(a) (0x00002184 + (a)*0x00000400) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL 5:0 +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_0 (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_1 (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_2 (0x00000002) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_3 (0x00000003) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_4 (0x00000004) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_5 (0x00000005) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_6 (0x00000006) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_7 (0x00000007) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_8 (0x00000008) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_9 (0x00000009) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_10 (0x0000000A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_11 (0x0000000B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_12 (0x0000000C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_13 (0x0000000D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_14 (0x0000000E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_15 (0x0000000F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_16 (0x00000010) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_17 (0x00000011) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_18 (0x00000012) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_19 (0x00000013) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_20 (0x00000014) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_21 (0x00000015) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_22 (0x00000016) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_23 (0x00000017) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_24 (0x00000018) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_25 (0x00000019) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_26 (0x0000001A) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_27 (0x0000001B) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_28 (0x0000001C) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_29 (0x0000001D) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_30 (0x0000001E) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_WIN_31 (0x0000001F) +#define NVC57D_HEAD_SET_CRC_CONTROL_CONTROLLING_CHANNEL_CORE (0x00000020) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE 8:8 +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_FALSE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_EXPECT_BUFFER_COLLAPSE_TRUE (0x00000001) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC 19:12 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_PRIMARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC 27:20 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_NONE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SF (0x00000030) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR(i) (0x00000050 +(i)) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR__SIZE_1 8 +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR0 (0x00000050) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR1 (0x00000051) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR2 (0x00000052) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR3 (0x00000053) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR4 (0x00000054) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR5 (0x00000055) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR6 (0x00000056) +#define NVC57D_HEAD_SET_CRC_CONTROL_SECONDARY_CRC_SOR7 (0x00000057) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE 9:9 +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_DISABLE (0x00000000) +#define NVC57D_HEAD_SET_CRC_CONTROL_CRC_DURING_SNOOZE_ENABLE (0x00000001) #define NVC57D_HEAD_SET_OLUT_CONTROL(a) (0x00002280 + (a)*0x00000400) #define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE 0:0 #define NVC57D_HEAD_SET_OLUT_CONTROL_INTERPOLATE_DISABLE (0x00000000) diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index c68cc957248e..a582c0cb0cb0 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -71,6 +71,7 @@ #define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f #define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f #define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f +#define AMPERE_CHANNEL_GPFIFO_B /* clc36f.h */ 0x0000c76f #define NV50_DISP /* cl5070.h */ 0x00005070 #define G82_DISP /* cl5070.h */ 0x00008270 @@ -200,6 +201,7 @@ #define PASCAL_DMA_COPY_B 0x0000c1b5 #define VOLTA_DMA_COPY_A 0x0000c3b5 #define TURING_DMA_COPY_A 0x0000c5b5 +#define AMPERE_DMA_COPY_B 0x0000c7b5 #define FERMI_DECOMPRESS 0x000090b8 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 54fab7cc36c1..64ee82c7c1be 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -77,4 +77,5 @@ int gp100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct int gp10b_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); int gv100_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); int tu102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); +int ga102_fifo_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 7c15f6448428..6140db756d06 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -364,7 +364,6 @@ void * nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { struct acpi_device *acpidev; - acpi_handle handle; int type, ret; void *edid; @@ -377,12 +376,8 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = ACPI_HANDLE(dev->dev); - if (!handle) - return NULL; - - ret = acpi_bus_get_device(handle, &acpidev); - if (ret) + acpidev = ACPI_COMPANION(dev->dev); + if (!acpidev) return NULL; ret = acpi_video_get_edid(acpidev, type, -1, &edid); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 1cbd71abc80a..ae2f2abc8f5a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -308,7 +308,10 @@ nv50_backlight_init(struct nouveau_backlight *bl, if (ret < 0) return ret; - if (drm_edp_backlight_supported(edp_dpcd)) { + /* TODO: Add support for hybrid PWM/DPCD panels */ + if (drm_edp_backlight_supported(edp_dpcd) && + (edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP)) { NV_DEBUG(drm, "DPCD backlight controls supported on %s\n", nv_conn->base.name); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e8c445eb1100..41b78e9ecd4e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -2045,7 +2045,6 @@ nouveau_run_vbios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; - int ret = 0; /* Reset the BIOS head to 0. */ bios->state.crtchead = 0; @@ -2058,7 +2057,7 @@ nouveau_run_vbios_init(struct drm_device *dev) bios->fp.lvds_init_run = false; } - return ret; + return 0; } static bool diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 17a0a3ece485..fa73fe57f97b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -844,6 +844,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 80099ef75702..ea7769135b0d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -250,7 +250,8 @@ static int nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, u64 runlist, bool priv, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A, + static const u16 oclasses[] = { AMPERE_CHANNEL_GPFIFO_B, + TURING_CHANNEL_GPFIFO_A, VOLTA_CHANNEL_GPFIFO_A, PASCAL_CHANNEL_GPFIFO_A, MAXWELL_CHANNEL_GPFIFO_A, @@ -386,7 +387,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) nvif_object_map(&chan->user, NULL, 0); - if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { + if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO && + chan->user.oclass < AMPERE_CHANNEL_GPFIFO_B) { ret = nvif_notify_ctor(&chan->user, "abi16ChanKilled", nouveau_channel_killed, true, NV906F_V0_NTFY_KILLED, diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index c2bc05eb2e54..1cbe01048b93 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -207,6 +207,7 @@ static const struct file_operations nouveau_pstate_fops = { .open = nouveau_debugfs_pstate_open, .read = seq_read, .write = nouveau_debugfs_pstate_set, + .release = single_release, }; static struct drm_info_list nouveau_debugfs_list[] = { diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 929de41c281f..2b460835a438 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -306,7 +306,7 @@ nouveau_framebuffer_new(struct drm_device *dev, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct drm_framebuffer *fb; const struct drm_format_info *info; - unsigned int width, height, i; + unsigned int height, i; uint32_t tile_mode; uint8_t kind; int ret; @@ -343,9 +343,6 @@ nouveau_framebuffer_new(struct drm_device *dev, info = drm_get_format_info(dev, mode_cmd); for (i = 0; i < info->num_planes; i++) { - width = drm_format_info_plane_width(info, - mode_cmd->width, - i); height = drm_format_info_plane_height(info, mode_cmd->height, i); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 92987daa5e17..3828aafd3ac4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, goto error_dma_unmap; mutex_unlock(&svmm->mutex); - args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + args->dst[0] = migrate_pfn(page_to_pfn(dpage)); return 0; error_dma_unmap: @@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); if (src & MIGRATE_PFN_WRITE) *pfn |= NVIF_VMM_PFNMAP_V0_W; - return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; + return migrate_pfn(page_to_pfn(dpage)); out_dma_unmap: dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 1f828c9f691c..561309d447e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,7 +22,6 @@ * Authors: Ben Skeggs */ -#include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> @@ -32,6 +31,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_vblank.h> @@ -345,6 +345,9 @@ nouveau_accel_gr_init(struct nouveau_drm *drm) u32 arg0, arg1; int ret; + if (device->info.family >= NV_DEVICE_INFO_V0_AMPERE) + return; + /* Allocate channel that has access to the graphics engine. */ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { arg0 = nvif_fifo_runlist(device, NV_DEVICE_HOST_RUNLIST_ENGINES_GR); @@ -469,6 +472,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case PASCAL_CHANNEL_GPFIFO_A: case VOLTA_CHANNEL_GPFIFO_A: case TURING_CHANNEL_GPFIFO_A: + case AMPERE_CHANNEL_GPFIFO_B: ret = nvc0_fence_create(drm); break; default: @@ -558,6 +562,7 @@ nouveau_drm_device_init(struct drm_device *dev) nvkm_dbgopt(nouveau_debug, "DRM"); INIT_LIST_HEAD(&drm->clients); + mutex_init(&drm->clients_lock); spin_lock_init(&drm->tile.lock); /* workaround an odd issue on nvc1 by disabling the device's @@ -628,6 +633,7 @@ fail_alloc: static void nouveau_drm_device_fini(struct drm_device *dev) { + struct nouveau_cli *cli, *temp_cli; struct nouveau_drm *drm = nouveau_drm(dev); if (nouveau_pmops_runtime()) { @@ -652,9 +658,28 @@ nouveau_drm_device_fini(struct drm_device *dev) nouveau_ttm_fini(drm); nouveau_vga_fini(drm); + /* + * There may be existing clients from as-yet unclosed files. For now, + * clean them up here rather than deferring until the file is closed, + * but this likely not correct if we want to support hot-unplugging + * properly. + */ + mutex_lock(&drm->clients_lock); + list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) { + list_del(&cli->head); + mutex_lock(&cli->mutex); + if (cli->abi16) + nouveau_abi16_fini(cli->abi16); + mutex_unlock(&cli->mutex); + nouveau_cli_fini(cli); + kfree(cli); + } + mutex_unlock(&drm->clients_lock); + nouveau_cli_fini(&drm->client); nouveau_cli_fini(&drm->master); nvif_parent_dtor(&drm->parent); + mutex_destroy(&drm->clients_lock); kfree(drm); } @@ -792,7 +817,7 @@ nouveau_drm_device_remove(struct drm_device *dev) struct nvkm_client *client; struct nvkm_device *device; - drm_dev_unregister(dev); + drm_dev_unplug(dev); client = nvxx_client(&drm->client.base); device = nvkm_device_find(client->device); @@ -1086,9 +1111,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) fpriv->driver_priv = cli; - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_add(&cli->head, &drm->clients); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); done: if (ret && cli) { @@ -1106,6 +1131,16 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_cli *cli = nouveau_cli(fpriv); struct nouveau_drm *drm = nouveau_drm(dev); + int dev_index; + + /* + * The device is gone, and as it currently stands all clients are + * cleaned up in the removal codepath. In the future this may change + * so that we can support hot-unplugging, but for now we immediately + * return to avoid a double-free situation. + */ + if (!drm_dev_enter(dev, &dev_index)) + return; pm_runtime_get_sync(dev->dev); @@ -1114,14 +1149,15 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) nouveau_abi16_fini(cli->abi16); mutex_unlock(&cli->mutex); - mutex_lock(&drm->client.mutex); + mutex_lock(&drm->clients_lock); list_del(&cli->head); - mutex_unlock(&drm->client.mutex); + mutex_unlock(&drm->clients_lock); nouveau_cli_fini(cli); kfree(cli); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); + drm_dev_exit(dev_index); } static const struct drm_ioctl_desc @@ -1322,7 +1358,7 @@ nouveau_drm_init(void) nouveau_display_options(); if (nouveau_modeset == -1) { - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) nouveau_modeset = 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ba65f136cf48..b2a970aa9bf4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -139,6 +139,11 @@ struct nouveau_drm { struct list_head clients; + /** + * @clients_lock: Protects access to the @clients list of &struct nouveau_cli. + */ + struct mutex clients_lock; + u8 old_pm_cap; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 05d0b3eb3690..26f9299df881 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -339,14 +339,15 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) } int -nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr) +nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, + bool exclusive, bool intr) { struct nouveau_fence_chan *fctx = chan->fence; - struct dma_fence *fence; struct dma_resv *resv = nvbo->bo.base.resv; - struct dma_resv_list *fobj; + struct dma_resv_iter cursor; + struct dma_fence *fence; struct nouveau_fence *f; - int ret = 0, i; + int ret; if (!exclusive) { ret = dma_resv_reserve_shared(resv, 1); @@ -355,10 +356,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e return ret; } - fobj = dma_resv_shared_list(resv); - fence = dma_resv_excl_fence(resv); - - if (fence) { + dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { struct nouveau_channel *prev = NULL; bool must_wait = true; @@ -366,41 +364,19 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e if (f) { rcu_read_lock(); prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) + if (prev && (prev == chan || + fctx->sync(f, prev, chan) == 0)) must_wait = false; rcu_read_unlock(); } - if (must_wait) + if (must_wait) { ret = dma_fence_wait(fence, intr); - - return ret; - } - - if (!exclusive || !fobj) - return ret; - - for (i = 0; i < fobj->shared_count && !ret; ++i) { - struct nouveau_channel *prev = NULL; - bool must_wait = true; - - fence = rcu_dereference_protected(fobj->shared[i], - dma_resv_held(resv)); - - f = nouveau_local_fence(fence, chan->drm); - if (f) { - rcu_read_lock(); - prev = rcu_dereference(f->channel); - if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0)) - must_wait = false; - rcu_read_unlock(); + if (ret) + return ret; } - - if (must_wait) - ret = dma_fence_wait(fence, intr); } - - return ret; + return 0; } void diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index beeafb3bb266..9416bee92141 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -247,10 +247,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, } ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); - if (ret) { - nouveau_bo_ref(NULL, &nvbo); + if (ret) return ret; - } /* we restrict allowed domains on nv50+ to only the types * that were requested at creation time. not possibly on diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index b0c3422cb01f..266809e511e2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -162,10 +162,14 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, */ mm = get_task_mm(current); + if (!mm) { + return -EINVAL; + } mmap_read_lock(mm); if (!cli->svm.svmm) { mmap_read_unlock(mm); + mmput(mm); return -EINVAL; } @@ -992,7 +996,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) if (ret) return ret; - buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL); + buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); if (!buffer->fault) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 7c9c928c3196..c3526a8622e3 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -204,7 +204,7 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv84_fence_context_new; priv->base.context_del = nv84_fence_context_del; - priv->base.uevent = true; + priv->base.uevent = drm->client.device.info.family < NV_DEVICE_INFO_V0_AMPERE; mutex_init(&priv->mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c index ac671202919e..0c8c55c73b12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/client.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -60,7 +60,7 @@ nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, return 0; } -const struct nvkm_sclass +static const struct nvkm_sclass nvkm_uclient_sclass = { .oclass = NVIF_CLASS_CLIENT, .minver = 0, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index 704df0f2d1f1..09a112af2f89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c @@ -78,6 +78,6 @@ int gt215_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine) { - return nvkm_falcon_new_(>215_ce, device, type, inst, + return nvkm_falcon_new_(>215_ce, device, type, -1, (device->chipset != 0xaf), 0x104000, pengine); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 93ddf63d1114..88d262ba648c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2602,6 +2602,7 @@ nv172_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static const struct nvkm_device_chip @@ -2622,6 +2623,28 @@ nv174_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip +nv176_chipset = { + .name = "GA106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static const struct nvkm_device_chip @@ -2642,6 +2665,7 @@ nv177_chipset = { .top = { 0x00000001, ga100_top_new }, .disp = { 0x00000001, ga102_disp_new }, .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, }; static int @@ -3069,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { @@ -3144,8 +3169,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, WARN_ON(device->chip->ptr.inst & ~((1 << ARRAY_SIZE(device->ptr)) - 1)); \ for (j = 0; device->chip->ptr.inst && j < ARRAY_SIZE(device->ptr); j++) { \ if ((device->chip->ptr.inst & BIT(j)) && (subdev_mask & BIT_ULL(type))) { \ - int inst = (device->chip->ptr.inst == 1) ? -1 : (j); \ - ret = device->chip->ptr.ctor(device, (type), inst, &device->ptr[j]); \ + ret = device->chip->ptr.ctor(device, (type), (j), &device->ptr[j]); \ subdev = nvkm_device_subdev(device, (type), (j)); \ if (ret) { \ nvkm_subdev_del(&subdev); \ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c index b0ece71aefde..ce774579c89d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c @@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size) args->v0.count = 0; args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE; - args->v0.pwrsrc = -ENOSYS; + args->v0.pwrsrc = -ENODEV; args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index f28894fdede9..113ddc103ac2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -161,8 +161,8 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) if (imem && args->v0.ram_size > 0) args->v0.ram_user = args->v0.ram_user - imem->reserved; - strncpy(args->v0.chip, device->chip->name, sizeof(args->v0.chip)); - strncpy(args->v0.name, device->name, sizeof(args->v0.name)); + snprintf(args->v0.chip, sizeof(args->v0.chip), "%s", device->chip->name); + snprintf(args->v0.name, sizeof(args->v0.name), "%s", device->name); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c index e20a48f201f6..448a515057c7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregv100.c @@ -106,6 +106,8 @@ gv100_disp_core_mthd_head = { { 0x20a4, 0x6820a4 }, { 0x20a8, 0x6820a8 }, { 0x20ac, 0x6820ac }, + { 0x2180, 0x682180 }, + { 0x2184, 0x682184 }, { 0x218c, 0x68218c }, { 0x2194, 0x682194 }, { 0x2198, 0x682198 }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c index 6e3c450eaace..3ff49344abc7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c @@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); - nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 3209eb7af65f..5e831d347a95 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/fifo/gp100.o nvkm-y += nvkm/engine/fifo/gp10b.o nvkm-y += nvkm/engine/fifo/gv100.o nvkm-y += nvkm/engine/fifo/tu102.o +nvkm-y += nvkm/engine/fifo/ga102.o nvkm-y += nvkm/engine/fifo/chan.o nvkm-y += nvkm/engine/fifo/channv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c index 353b77d9b3dc..3492c561f2cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c @@ -82,7 +82,7 @@ g84_fifo_chan_engine_fini(struct nvkm_fifo_chan *base, if (offset < 0) return 0; - engn = fifo->base.func->engine_id(&fifo->base, engine); + engn = fifo->base.func->engine_id(&fifo->base, engine) - 1; save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn); nvkm_wr32(device, 0x0032fc, chan->base.inst->addr >> 12); done = nvkm_msec(device, 2000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c new file mode 100644 index 000000000000..c630dbd2911a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c @@ -0,0 +1,311 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine) +#define ga102_chan(p) container_of((p), struct ga102_chan, object) +#include <engine/fifo.h> +#include "user.h" + +#include <core/memory.h> +#include <subdev/mmu.h> +#include <subdev/timer.h> +#include <subdev/top.h> + +#include <nvif/cl0080.h> +#include <nvif/clc36f.h> +#include <nvif/class.h> + +struct ga102_fifo { + struct nvkm_fifo base; +}; + +struct ga102_chan { + struct nvkm_object object; + + struct { + u32 runl; + u32 chan; + } ctrl; + + struct nvkm_memory *mthd; + struct nvkm_memory *inst; + struct nvkm_memory *user; + struct nvkm_memory *runl; + + struct nvkm_vmm *vmm; +}; + +static int +ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass) +{ + if (index == 0) { + oclass->ctor = nvkm_object_new; + oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B }; + return 0; + } + + return -EINVAL; +} + +static int +ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc, + enum nvkm_object_map *type, u64 *addr, u64 *size) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + u64 bar2 = nvkm_memory_bar2(chan->user); + + if (bar2 == ~0ULL) + return -EFAULT; + + *type = NVKM_OBJECT_MAP_IO; + *addr = device->func->resource_addr(device, 3) + bar2; + *size = 0x1000; + return 0; +} + +static int +ga102_chan_fini(struct nvkm_object *object, bool suspend) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + + nvkm_wr32(device, chan->ctrl.chan, 0x00000003); + + nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000)) + break; + ); + + nvkm_wr32(device, chan->ctrl.runl + 0x088, 0); + + nvkm_wr32(device, chan->ctrl.chan, 0xffffffff); + return 0; +} + +static int +ga102_chan_init(struct nvkm_object *object) +{ + struct ga102_chan *chan = ga102_chan(object); + struct nvkm_device *device = chan->object.engine->subdev.device; + + nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000); + + nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl))); + nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl))); + nvkm_wr32(device, chan->ctrl.runl + 0x088, 2); + + nvkm_wr32(device, chan->ctrl.chan, 0x00000002); + nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0); + return 0; +} + +static void * +ga102_chan_dtor(struct nvkm_object *object) +{ + struct ga102_chan *chan = ga102_chan(object); + + if (chan->vmm) { + nvkm_vmm_part(chan->vmm, chan->inst); + nvkm_vmm_unref(&chan->vmm); + } + + nvkm_memory_unref(&chan->runl); + nvkm_memory_unref(&chan->user); + nvkm_memory_unref(&chan->inst); + nvkm_memory_unref(&chan->mthd); + return chan; +} + +static const struct nvkm_object_func +ga102_chan = { + .dtor = ga102_chan_dtor, + .init = ga102_chan_init, + .fini = ga102_chan_fini, + .map = ga102_chan_map, + .sclass = ga102_chan_sclass, +}; + +static int +ga102_chan_new(struct nvkm_device *device, + const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject) +{ + struct volta_channel_gpfifo_a_v0 *args = argv; + struct nvkm_top_device *tdev; + struct nvkm_vmm *vmm; + struct ga102_chan *chan; + int ret; + + if (argc != sizeof(*args)) + return -ENOSYS; + + vmm = nvkm_uvmm_search(oclass->client, args->vmm); + if (IS_ERR(vmm)) + return PTR_ERR(vmm); + + if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) + return -ENOMEM; + + nvkm_object_ctor(&ga102_chan, oclass, &chan->object); + *pobject = &chan->object; + + list_for_each_entry(tdev, &device->top->device, head) { + if (tdev->type == NVKM_ENGINE_CE) { + chan->ctrl.runl = tdev->runlist; + break; + } + } + + if (!chan->ctrl.runl) + return -ENODEV; + + chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0; + + args->chid = 0; + args->inst = 0; + args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst); + if (ret) + return ret; + + nvkm_kmap(chan->inst); + nvkm_wo32(chan->inst, 0x010, 0x0000face); + nvkm_wo32(chan->inst, 0x030, 0x7ffff902); + nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset)); + nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) | + (order_base_2(args->ilength / 8) << 16)); + nvkm_wo32(chan->inst, 0x084, 0x20400000); + nvkm_wo32(chan->inst, 0x094, 0x30000001); + nvkm_wo32(chan->inst, 0x0ac, 0x00020000); + nvkm_wo32(chan->inst, 0x0e4, 0x00000000); + nvkm_wo32(chan->inst, 0x0e8, 0); + nvkm_wo32(chan->inst, 0x0f4, 0x00001000); + nvkm_wo32(chan->inst, 0x0f8, 0x10003080); + nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000); + nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd))); + nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd))); + nvkm_done(chan->inst); + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user); + if (ret) + return ret; + + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl); + if (ret) + return ret; + + nvkm_kmap(chan->runl); + nvkm_wo32(chan->runl, 0x00, 0x80030001); + nvkm_wo32(chan->runl, 0x04, 1); + nvkm_wo32(chan->runl, 0x08, 0); + nvkm_wo32(chan->runl, 0x0c, 0x00000000); + nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user))); + nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user))); + nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst))); + nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst))); + nvkm_done(chan->runl); + + ret = nvkm_vmm_join(vmm, chan->inst); + if (ret) + return ret; + + chan->vmm = nvkm_vmm_ref(vmm); + return 0; +} + +static const struct nvkm_device_oclass +ga102_chan_oclass = { + .ctor = ga102_chan_new, +}; + +static int +ga102_user_new(struct nvkm_device *device, + const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject) +{ + return tu102_fifo_user_new(oclass, argv, argc, pobject); +} + +static const struct nvkm_device_oclass +ga102_user_oclass = { + .ctor = ga102_user_new, +}; + +static int +ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class) +{ + if (index == 0) { + oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A }; + *class = &ga102_user_oclass; + return 0; + } else + if (index == 1) { + oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B }; + *class = &ga102_chan_oclass; + return 0; + } + + return 2; +} + +static int +ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data) +{ + switch (mthd) { + case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0; + default: + break; + } + + return -ENOSYS; +} + +static void * +ga102_fifo_dtor(struct nvkm_engine *engine) +{ + return ga102_fifo(engine); +} + +static const struct nvkm_engine_func +ga102_fifo = { + .dtor = ga102_fifo_dtor, + .info = ga102_fifo_info, + .base.sclass = ga102_fifo_sclass, +}; + +int +ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, + struct nvkm_fifo **pfifo) +{ + struct ga102_fifo *fifo; + + if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL))) + return -ENOMEM; + + nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine); + *pfifo = &fifo->base; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c index e417044cc347..260b197f81bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c @@ -49,7 +49,7 @@ tu102_fifo_runlist_commit(struct gk104_fifo *fifo, int runl, /*XXX: how to wait? can you even wait? */ } -const struct gk104_fifo_runlist_func +static const struct gk104_fifo_runlist_func tu102_fifo_runlist = { .size = 16, .cgrp = gv100_fifo_runlist_cgrp, diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index 262641a014b0..c91130a6be2a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -117,8 +117,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cdb1ead26d84..82b4c8e1457c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -207,11 +207,13 @@ int gm200_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { wpr_header_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index fb9132a39bb1..fd97a935a380 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -161,11 +161,13 @@ int gp102_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { wpr_header_v1_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 9de74f41dcd2..142079403864 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -401,7 +401,7 @@ init_table_(struct nvbios_init *init, u16 offset, const char *name) #define init_macro_table(b) init_table_((b), 0x04, "macro table") #define init_condition_table(b) init_table_((b), 0x06, "condition table") #define init_io_condition_table(b) init_table_((b), 0x08, "io condition table") -#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table") +#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag condition table") #define init_function_table(b) init_table_((b), 0x0c, "function table") #define init_xlat_table(b) init_table_((b), 0x10, "xlat table"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c index fb90d47e1225..a9cdf2411187 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c @@ -32,7 +32,6 @@ mcp89_devinit_disable(struct nvkm_devinit *init) struct nvkm_device *device = init->subdev.device; u32 r001540 = nvkm_rd32(device, 0x001540); u32 r00154c = nvkm_rd32(device, 0x00154c); - u64 disable = 0; if (!(r001540 & 0x40000000)) { nvkm_subdev_disable(device, NVKM_ENGINE_MSPDEC, 0); @@ -48,7 +47,7 @@ mcp89_devinit_disable(struct nvkm_devinit *init) if (!(r00154c & 0x00000200)) nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0); - return disable; + return 0; } static const struct nvkm_devinit_func diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index 24382875fb4f..455e95a89259 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -94,20 +94,13 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static int +static void nvkm_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; if (!pmu->func->enabled(pmu)) - return 0; - - /* Inhibit interrupts, and wait for idle. */ - nvkm_wr32(device, 0x10a014, 0x0000ffff); - nvkm_msec(device, 2000, - if (!nvkm_rd32(device, 0x10a04c)) - break; - ); + return; /* Reset. */ if (pmu->func->reset) @@ -118,25 +111,37 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu) if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) break; ); - - return 0; } static int nvkm_pmu_preinit(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_pmu_reset(pmu); + nvkm_pmu_reset(pmu); + return 0; } static int nvkm_pmu_init(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - int ret = nvkm_pmu_reset(pmu); - if (ret == 0 && pmu->func->init) - ret = pmu->func->init(pmu); - return ret; + struct nvkm_device *device = pmu->subdev.device; + + if (!pmu->func->init) + return 0; + + if (pmu->func->enabled(pmu)) { + /* Inhibit interrupts, and wait for idle. */ + nvkm_wr32(device, 0x10a014, 0x0000ffff); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x10a04c)) + break; + ); + + nvkm_pmu_reset(pmu); + } + + return pmu->func->init(pmu); } static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 5968c7696596..40439e329aa9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 148706977eec..e1772211b0a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,7 +211,7 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 00da1b873ce8..6bf7fc1bd1e3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 461f722656e2..ba1583bb618b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,7 +78,7 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index e7860d177353..bcaade758ff7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -44,6 +44,8 @@ void gf100_pmu_reset(struct nvkm_pmu *); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c index 31933f3e5a07..c982d834c8d9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c @@ -54,7 +54,7 @@ ga100_top_oneinit(struct nvkm_top *top) info->reset = (data & 0x0000001f); break; case 2: - info->runlist = (data & 0x0000fc00) >> 10; + info->runlist = (data & 0x00fffc00); info->engine = (data & 0x00000003); break; default: @@ -85,9 +85,10 @@ ga100_top_oneinit(struct nvkm_top *top) } nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " - "runlist %2d engine %2d reset %2d\n", type, inst, + "runlist %6x engine %2d reset %2d\n", type, inst, info->type == NVKM_SUBDEV_NR ? "????????" : nvkm_subdev_type[info->type], - info->addr, info->fault, info->runlist, info->engine, info->reset); + info->addr, info->fault, info->runlist < 0 ? 0 : info->runlist, + info->engine, info->reset); info = NULL; } diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index d6e4df291d6f..455e1a91f0e5 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_OMAP tristate "OMAP DRM" - depends on DRM + depends on DRM && OF depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM select DRM_KMS_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index 21e8277ff88f..710b4e0abcf0 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile @@ -9,6 +9,7 @@ omapdrm-y := omap_drv.o \ omap_debugfs.o \ omap_crtc.o \ omap_plane.o \ + omap_overlay.o \ omap_encoder.o \ omap_fb.o \ omap_gem.o \ diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 5619420cc2cc..c4de142cc85b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -92,6 +92,8 @@ struct dispc_features { u8 mgr_height_start; u16 mgr_width_max; u16 mgr_height_max; + u16 ovl_width_max; + u16 ovl_height_max; unsigned long max_lcd_pclk; unsigned long max_tv_pclk; unsigned int max_downscale; @@ -1279,8 +1281,8 @@ static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc, return dispc->feat->burst_size_unit * 8; } -static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, - enum omap_plane_id plane, u32 fourcc) +bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, + enum omap_plane_id plane, u32 fourcc) { const u32 *modes; unsigned int i; @@ -2487,6 +2489,11 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc, return 0; } +enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane) +{ + return dispc->feat->overlay_caps[plane]; +} + #define DIV_FRAC(dividend, divisor) \ ((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100)) @@ -2599,6 +2606,12 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc, return 0; } +void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height) +{ + *width = dispc->feat->ovl_width_max; + *height = dispc->feat->ovl_height_max; +} + static int dispc_ovl_setup_common(struct dispc_device *dispc, enum omap_plane_id plane, enum omap_overlay_caps caps, @@ -4240,6 +4253,8 @@ static const struct dispc_features omap24xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 66500000, .max_downscale = 2, /* @@ -4278,6 +4293,8 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4313,6 +4330,8 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4348,6 +4367,8 @@ static const struct dispc_features omap36xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4383,6 +4404,8 @@ static const struct dispc_features am43xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 173000000, .max_tv_pclk = 59000000, .max_downscale = 4, @@ -4418,6 +4441,8 @@ static const struct dispc_features omap44xx_dispc_feats = { .mgr_height_start = 26, .mgr_width_max = 2048, .mgr_height_max = 2048, + .ovl_width_max = 2048, + .ovl_height_max = 2048, .max_lcd_pclk = 170000000, .max_tv_pclk = 185625000, .max_downscale = 4, @@ -4457,8 +4482,10 @@ static const struct dispc_features omap54xx_dispc_feats = { .mgr_height_start = 27, .mgr_width_max = 4096, .mgr_height_max = 4096, + .ovl_width_max = 2048, + .ovl_height_max = 4096, .max_lcd_pclk = 170000000, - .max_tv_pclk = 186000000, + .max_tv_pclk = 192000000, .max_downscale = 4, .max_line_width = 2048, .min_pcd = 1, @@ -4725,7 +4752,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) struct dispc_device *dispc; u32 rev; int r = 0; - struct resource *dispc_mem; struct device_node *np = pdev->dev.of_node; dispc = kzalloc(sizeof(*dispc), GFP_KERNEL); @@ -4750,8 +4776,7 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (r) goto err_free; - dispc_mem = platform_get_resource(dispc->pdev, IORESOURCE_MEM, 0); - dispc->base = devm_ioremap_resource(&pdev->dev, dispc_mem); + dispc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dispc->base)) { r = PTR_ERR(dispc->base); goto err_free; @@ -4844,7 +4869,7 @@ static int dispc_remove(struct platform_device *pdev) return 0; } -static int dispc_runtime_suspend(struct device *dev) +static __maybe_unused int dispc_runtime_suspend(struct device *dev) { struct dispc_device *dispc = dev_get_drvdata(dev); @@ -4859,7 +4884,7 @@ static int dispc_runtime_suspend(struct device *dev) return 0; } -static int dispc_runtime_resume(struct device *dev) +static __maybe_unused int dispc_runtime_resume(struct device *dev) { struct dispc_device *dispc = dev_get_drvdata(dev); @@ -4887,8 +4912,7 @@ static int dispc_runtime_resume(struct device *dev) } static const struct dev_pm_ops dispc_pm_ops = { - .runtime_suspend = dispc_runtime_suspend, - .runtime_resume = dispc_runtime_resume, + SET_RUNTIME_PM_OPS(dispc_runtime_suspend, dispc_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 5f1722b040f4..a6845856cbce 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -2094,7 +2094,7 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int vc, u8 b1, b2, b3, b4; if (dsi->debug_write) - DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len); + DSSDBG("dsi_vc_send_long, %zu bytes\n", msg->tx_len); /* len + header */ if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) { @@ -2390,7 +2390,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc, return 0; err: - DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len); + DSSERR("%s(vc %d, reqlen %zu) failed\n", __func__, vc, msg->tx_len); return r; } @@ -4884,7 +4884,6 @@ static int dsi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dsi_data *dsi; struct resource *dsi_mem; - struct resource *res; unsigned int i; int r; @@ -4921,13 +4920,11 @@ static int dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->proto_base)) return PTR_ERR(dsi->proto_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - dsi->phy_base = devm_ioremap_resource(dev, res); + dsi->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(dsi->phy_base)) return PTR_ERR(dsi->phy_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); - dsi->pll_base = devm_ioremap_resource(dev, res); + dsi->pll_base = devm_platform_ioremap_resource_byname(pdev, "pll"); if (IS_ERR(dsi->pll_base)) return PTR_ERR(dsi->pll_base); @@ -5061,7 +5058,7 @@ static int dsi_remove(struct platform_device *pdev) return 0; } -static int dsi_runtime_suspend(struct device *dev) +static __maybe_unused int dsi_runtime_suspend(struct device *dev) { struct dsi_data *dsi = dev_get_drvdata(dev); @@ -5074,7 +5071,7 @@ static int dsi_runtime_suspend(struct device *dev) return 0; } -static int dsi_runtime_resume(struct device *dev) +static __maybe_unused int dsi_runtime_resume(struct device *dev) { struct dsi_data *dsi = dev_get_drvdata(dev); @@ -5086,8 +5083,7 @@ static int dsi_runtime_resume(struct device *dev) } static const struct dev_pm_ops dsi_pm_ops = { - .runtime_suspend = dsi_runtime_suspend, - .runtime_resume = dsi_runtime_resume, + SET_RUNTIME_PM_OPS(dsi_runtime_suspend, dsi_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index d6a5862b4dbf..69b3e15b9356 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1424,7 +1424,6 @@ static int dss_probe(struct platform_device *pdev) const struct soc_device_attribute *soc; struct dss_component_match_data cmatch; struct component_match *match = NULL; - struct resource *dss_mem; struct dss_device *dss; int r; @@ -1452,8 +1451,7 @@ static int dss_probe(struct platform_device *pdev) dss->feat = of_match_device(dss_of_match, &pdev->dev)->data; /* Map I/O registers, get and setup clocks. */ - dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dss->base = devm_ioremap_resource(&pdev->dev, dss_mem); + dss->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dss->base)) { r = PTR_ERR(dss->base); goto err_free_dss; @@ -1571,7 +1569,7 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); } -static int dss_runtime_suspend(struct device *dev) +static __maybe_unused int dss_runtime_suspend(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); @@ -1583,7 +1581,7 @@ static int dss_runtime_suspend(struct device *dev) return 0; } -static int dss_runtime_resume(struct device *dev) +static __maybe_unused int dss_runtime_resume(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); int r; @@ -1606,8 +1604,7 @@ static int dss_runtime_resume(struct device *dev) } static const struct dev_pm_ops dss_pm_ops = { - .runtime_suspend = dss_runtime_suspend, - .runtime_resume = dss_runtime_resume, + SET_RUNTIME_PM_OPS(dss_runtime_suspend, dss_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index a547527bb2f3..4ff02fbc0e71 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -397,6 +397,11 @@ int dispc_get_num_mgrs(struct dispc_device *dispc); const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, enum omap_plane_id plane); +void dispc_ovl_get_max_size(struct dispc_device *dispc, u16 *width, u16 *height); +bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, + enum omap_plane_id plane, u32 fourcc); +enum omap_overlay_caps dispc_ovl_get_caps(struct dispc_device *dispc, enum omap_plane_id plane); + u32 dispc_read_irqstatus(struct dispc_device *dispc); void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); void dispc_write_irqenable(struct dispc_device *dispc, u32 mask); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index 43592c1cf081..852987e67e40 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * HDMI CEC * @@ -10,19 +11,6 @@ * Heavily modified to use the linux CEC framework: * * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include <linux/kernel.h> diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h index 0292337c97cc..1ca5b5ca8a99 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.h @@ -1,20 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * HDMI header definition for OMAP4 HDMI CEC IP * * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _HDMI4_CEC_H_ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 35faa7f028c4..8720bf4f18fe 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -870,7 +870,6 @@ static const struct soc_device_attribute hdmi4_soc_devices[] = { int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { const struct hdmi4_features *features; - struct resource *res; const struct soc_device_attribute *soc; soc = soc_device_match(hdmi4_soc_devices); @@ -881,8 +880,7 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core) core->cts_swmode = features->cts_swmode; core->audio_use_mclk = features->audio_use_mclk; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); - core->base = devm_ioremap_resource(&pdev->dev, res); + core->base = devm_platform_ioremap_resource_byname(pdev, "core"); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6cc2ad7a420c..21564c38234f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -872,10 +872,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core) { - struct resource *res; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); - core->base = devm_ioremap_resource(&pdev->dev, res); + core->base = devm_platform_ioremap_resource_byname(pdev, "core"); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 5dc200f09c3c..060e8f76f2be 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -182,15 +182,12 @@ static const struct hdmi_phy_features omap54xx_phy_feats = { int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy, unsigned int version) { - struct resource *res; - if (version == 4) phy->features = &omap44xx_phy_feats; else phy->features = &omap54xx_phy_feats; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - phy->base = devm_ioremap_resource(&pdev->dev, res); + phy->base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(phy->base)) return PTR_ERR(phy->base); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 13bf649aba52..eea719243eaf 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -162,13 +162,11 @@ int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp) { int r; - struct resource *res; pll->pdev = pdev; pll->wp = wp; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); - pll->base = devm_ioremap_resource(&pdev->dev, res); + pll->base = devm_platform_ioremap_resource_byname(pdev, "pll"); if (IS_ERR(pll->base)) return PTR_ERR(pll->base); diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index e522c17955d0..4480b69ab5a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -806,7 +806,6 @@ static const struct soc_device_attribute venc_soc_devices[] = { static int venc_probe(struct platform_device *pdev) { struct venc_device *venc; - struct resource *venc_mem; int r; venc = kzalloc(sizeof(*venc), GFP_KERNEL); @@ -823,8 +822,7 @@ static int venc_probe(struct platform_device *pdev) venc->config = &venc_config_pal_trm; - venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); - venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); + venc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(venc->base)) { r = PTR_ERR(venc->base); goto err_free; @@ -881,7 +879,7 @@ static int venc_remove(struct platform_device *pdev) return 0; } -static int venc_runtime_suspend(struct device *dev) +static __maybe_unused int venc_runtime_suspend(struct device *dev) { struct venc_device *venc = dev_get_drvdata(dev); @@ -891,7 +889,7 @@ static int venc_runtime_suspend(struct device *dev) return 0; } -static int venc_runtime_resume(struct device *dev) +static __maybe_unused int venc_runtime_resume(struct device *dev) { struct venc_device *venc = dev_get_drvdata(dev); @@ -902,8 +900,7 @@ static int venc_runtime_resume(struct device *dev) } static const struct dev_pm_ops venc_pm_ops = { - .runtime_suspend = venc_runtime_suspend, - .runtime_resume = venc_runtime_resume, + SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index b72c3ffddc9a..b6b52049f753 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -137,7 +137,6 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss, const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" }; const char * const clkin_name[] = { "video1_clk", "video2_clk" }; - struct resource *res; struct dss_video_pll *vpll; void __iomem *pll_base, *clkctrl_base; struct clk *clk; @@ -146,16 +145,13 @@ struct dss_pll *dss_video_pll_init(struct dss_device *dss, /* PLL CONTROL */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]); - pll_base = devm_ioremap_resource(&pdev->dev, res); + pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]); if (IS_ERR(pll_base)) return ERR_CAST(pll_base); /* CLOCK CONTROL */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - clkctrl_name[id]); - clkctrl_base = devm_ioremap_resource(&pdev->dev, res); + clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]); if (IS_ERR(clkctrl_base)) return ERR_CAST(clkctrl_base); diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h index 58a8239d3e69..2288ed56f23d 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef OMAP_DMM_PRIV_H diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index ed770caf55c2..852e78a5f142 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * DMM IOMMU driver support functions for TI OMAP processors. * * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/completion.h> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 2f8918fe06d5..87a32b3cd3b0 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * Author: Rob Clark <rob@ti.com> * Andy Gross <andy.gross@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef OMAP_DMM_TILER_H #define OMAP_DMM_TILER_H diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f86e20578143..2720a58ccd90 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -117,6 +117,102 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) dispc_runtime_put(priv->dispc); } +static int drm_atomic_state_normalized_zpos_cmp(const void *a, const void *b) +{ + const struct drm_plane_state *sa = *(struct drm_plane_state **)a; + const struct drm_plane_state *sb = *(struct drm_plane_state **)b; + + if (sa->normalized_zpos != sb->normalized_zpos) + return sa->normalized_zpos - sb->normalized_zpos; + else + return sa->plane->base.id - sb->plane->base.id; +} + +/* + * This replaces the drm_atomic_normalize_zpos to handle the dual overlay case. + * + * Since both halves need to be 'appear' side by side the zpos is + * recalculated when dealing with dual overlay cases so that the other + * planes zpos is consistent. + */ +static int omap_atomic_update_normalize_zpos(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_state, *new_state; + struct drm_plane *plane; + int c, i, n, inc; + int total_planes = dev->mode_config.num_total_plane; + struct drm_plane_state **states; + int ret = 0; + + states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL); + if (!states) + return -ENOMEM; + + for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, c) { + if (old_state->plane_mask == new_state->plane_mask && + !new_state->zpos_changed) + continue; + + /* Reset plane increment and index value for every crtc */ + n = 0; + + /* + * Normalization process might create new states for planes + * which normalized_zpos has to be recalculated. + */ + drm_for_each_plane_mask(plane, dev, new_state->plane_mask) { + struct drm_plane_state *plane_state = + drm_atomic_get_plane_state(new_state->state, + plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto done; + } + states[n++] = plane_state; + } + + sort(states, n, sizeof(*states), + drm_atomic_state_normalized_zpos_cmp, NULL); + + for (i = 0, inc = 0; i < n; i++) { + plane = states[i]->plane; + + states[i]->normalized_zpos = i + inc; + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] updated normalized zpos value %d\n", + plane->base.id, plane->name, + states[i]->normalized_zpos); + + if (is_omap_plane_dual_overlay(states[i])) + inc++; + } + new_state->zpos_changed = true; + } + +done: + kfree(states); + return ret; +} + +static int omap_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check(dev, state); + if (ret) + return ret; + + if (dev->mode_config.normalize_zpos) { + ret = omap_atomic_update_normalize_zpos(dev, state); + if (ret) + return ret; + } + + return 0; +} + static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = { .atomic_commit_tail = omap_atomic_commit_tail, }; @@ -124,10 +220,86 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = static const struct drm_mode_config_funcs omap_mode_config_funcs = { .fb_create = omap_framebuffer_create, .output_poll_changed = drm_fb_helper_output_poll_changed, - .atomic_check = drm_atomic_helper_check, + .atomic_check = omap_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; +/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct omap_global_state * +omap_get_existing_global_state(struct omap_drm_private *priv) +{ + return to_omap_global_state(priv->glob_obj.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct omap_global_state *__must_check +omap_get_global_state(struct drm_atomic_state *s) +{ + struct omap_drm_private *priv = s->dev->dev_private; + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(s, &priv->glob_obj); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_omap_global_state(priv_state); +} + +static struct drm_private_state * +omap_global_duplicate_state(struct drm_private_obj *obj) +{ + struct omap_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void omap_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct omap_global_state *omap_state = to_omap_global_state(state); + + kfree(omap_state); +} + +static const struct drm_private_state_funcs omap_global_state_funcs = { + .atomic_duplicate_state = omap_global_duplicate_state, + .atomic_destroy_state = omap_global_destroy_state, +}; + +static int omap_global_obj_init(struct drm_device *dev) +{ + struct omap_drm_private *priv = dev->dev_private; + struct omap_global_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(dev, &priv->glob_obj, &state->base, + &omap_global_state_funcs); + return 0; +} + +static void omap_global_obj_fini(struct omap_drm_private *priv) +{ + drm_atomic_private_obj_fini(&priv->glob_obj); +} + static void omap_disconnect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; @@ -231,8 +403,6 @@ static int omap_modeset_init(struct drm_device *dev) if (!omapdss_stack_is_ready()) return -EPROBE_DEFER; - drm_mode_config_init(dev); - ret = omap_modeset_init_properties(dev); if (ret < 0) return ret; @@ -572,7 +742,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) priv->dss->mgr_ops_priv = priv; soc = soc_device_match(omapdrm_soc_devices); - priv->omaprev = soc ? (unsigned int)soc->data : 0; + priv->omaprev = soc ? (uintptr_t)soc->data : 0; priv->wq = alloc_ordered_workqueue("omapdrm", 0); mutex_init(&priv->list_lock); @@ -583,10 +753,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_gem_init(ddev); + drm_mode_config_init(ddev); + + ret = omap_global_obj_init(ddev); + if (ret) + goto err_gem_deinit; + + ret = omap_hwoverlays_init(priv); + if (ret) + goto err_free_priv_obj; + ret = omap_modeset_init(ddev); if (ret) { dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret); - goto err_gem_deinit; + goto err_free_overlays; } /* Initialize vblank handling, start with all CRTCs disabled. */ @@ -618,7 +798,12 @@ err_cleanup_helpers: omap_fbdev_fini(ddev); err_cleanup_modeset: omap_modeset_fini(ddev); +err_free_overlays: + omap_hwoverlays_destroy(priv); +err_free_priv_obj: + omap_global_obj_fini(priv); err_gem_deinit: + drm_mode_config_cleanup(ddev); omap_gem_deinit(ddev); destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); @@ -642,6 +827,9 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) drm_atomic_helper_shutdown(ddev); omap_modeset_fini(ddev); + omap_hwoverlays_destroy(priv); + omap_global_obj_fini(priv); + drm_mode_config_cleanup(ddev); omap_gem_deinit(ddev); destroy_workqueue(priv->wq); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 591d4c273f02..825960fd3ea9 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -14,6 +14,7 @@ #include "dss/omapdss.h" #include "dss/dss.h" +#include <drm/drm_atomic.h> #include <drm/drm_gem.h> #include <drm/omap_drm.h> @@ -24,6 +25,7 @@ #include "omap_gem.h" #include "omap_irq.h" #include "omap_plane.h" +#include "omap_overlay.h" #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* verbose debug */ @@ -40,6 +42,19 @@ struct omap_drm_pipeline { unsigned int alias_id; }; +/* + * Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +#define to_omap_global_state(x) container_of(x, struct omap_global_state, base) + +struct omap_global_state { + struct drm_private_state base; + + /* global atomic state of assignment between overlays and planes */ + struct drm_plane *hwoverlay_to_plane[8]; +}; + struct omap_drm_private { struct drm_device *ddev; struct device *dev; @@ -57,6 +72,11 @@ struct omap_drm_private { unsigned int num_planes; struct drm_plane *planes[8]; + unsigned int num_ovls; + struct omap_hw_overlay *overlays[8]; + + struct drm_private_obj glob_obj; + struct drm_fb_helper *fbdev; struct workqueue_struct *wq; @@ -85,4 +105,8 @@ struct omap_drm_private { void omap_debugfs_init(struct drm_minor *minor); +struct omap_global_state * __must_check omap_get_global_state(struct drm_atomic_state *s); + +struct omap_global_state *omap_get_existing_global_state(struct omap_drm_private *priv); + #endif /* __OMAPDRM_DRV_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 190afc564914..895e66b08a81 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -131,7 +131,9 @@ static u32 drm_rotation_to_tiler(unsigned int drm_rot) /* update ovl info for scanout, handles cases of multi-planar fb's, etc. */ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, - struct drm_plane_state *state, struct omap_overlay_info *info) + struct drm_plane_state *state, + struct omap_overlay_info *info, + struct omap_overlay_info *r_info) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); const struct drm_format_info *format = omap_fb->format; @@ -218,6 +220,35 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, } else { info->p_uv_addr = 0; } + + if (r_info) { + info->width /= 2; + info->out_width /= 2; + + *r_info = *info; + + if (fb->format->is_yuv) { + if (info->width & 1) { + info->width++; + r_info->width--; + } + + if (info->out_width & 1) { + info->out_width++; + r_info->out_width--; + } + } + + r_info->pos_x = info->pos_x + info->out_width; + + r_info->paddr = get_linear_addr(fb, format, 0, + x + info->width, y); + if (fb->format->format == DRM_FORMAT_NV12) { + r_info->p_uv_addr = + get_linear_addr(fb, format, 1, + x + info->width, y); + } + } } /* pin, prepare for scanout: */ diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h index c0e19aed8220..b75f0b5ef1d8 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.h +++ b/drivers/gpu/drm/omapdrm/omap_fb.h @@ -26,7 +26,9 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, - struct drm_plane_state *state, struct omap_overlay_info *info); + struct drm_plane_state *state, + struct omap_overlay_info *info, + struct omap_overlay_info *r_info); bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb); void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 38af6195d959..b0fa17409b66 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -789,7 +789,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) if (omap_obj->flags & OMAP_BO_TILED_MASK) { block = tiler_reserve_2d(fmt, omap_obj->width, - omap_obj->height, 0); + omap_obj->height, PAGE_SIZE); } else { block = tiler_reserve_1d(obj->size); } @@ -851,6 +851,11 @@ static void omap_gem_unpin_locked(struct drm_gem_object *obj) return; if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { + if (omap_obj->sgt) { + sg_free_table(omap_obj->sgt); + kfree(omap_obj->sgt); + omap_obj->sgt = NULL; + } ret = tiler_unpin(omap_obj->block); if (ret) { dev_err(obj->dev->dev, @@ -963,6 +968,78 @@ int omap_gem_put_pages(struct drm_gem_object *obj) return 0; } +struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + dma_addr_t addr; + struct sg_table *sgt; + struct scatterlist *sg; + unsigned int count, len, stride, i; + int ret; + + ret = omap_gem_pin(obj, &addr); + if (ret) + return ERR_PTR(ret); + + mutex_lock(&omap_obj->lock); + + sgt = omap_obj->sgt; + if (sgt) + goto out; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto err_unpin; + } + + if (omap_obj->flags & OMAP_BO_TILED_MASK) { + enum tiler_fmt fmt = gem2fmt(omap_obj->flags); + + len = omap_obj->width << (int)fmt; + count = omap_obj->height; + stride = tiler_stride(fmt, 0); + } else { + len = obj->size; + count = 1; + stride = 0; + } + + ret = sg_alloc_table(sgt, count, GFP_KERNEL); + if (ret) + goto err_free; + + for_each_sg(sgt->sgl, sg, count, i) { + sg_set_page(sg, phys_to_page(addr), len, offset_in_page(addr)); + sg_dma_address(sg) = addr; + sg_dma_len(sg) = len; + + addr += stride; + } + + omap_obj->sgt = sgt; +out: + mutex_unlock(&omap_obj->lock); + return sgt; + +err_free: + kfree(sgt); +err_unpin: + mutex_unlock(&omap_obj->lock); + omap_gem_unpin(obj); + return ERR_PTR(ret); +} + +void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + + if (WARN_ON(omap_obj->sgt != sgt)) + return; + + omap_gem_unpin(obj); +} + #ifdef CONFIG_DRM_FBDEV_EMULATION /* * Get kernel virtual address for CPU access.. this more or less only diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h index eda9b4839c30..19209e319663 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.h +++ b/drivers/gpu/drm/omapdrm/omap_gem.h @@ -82,5 +82,7 @@ u32 omap_gem_flags(struct drm_gem_object *obj); int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, int x, int y, dma_addr_t *dma_addr); int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient); +struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj); +void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt); #endif /* __OMAPDRM_GEM_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index f4cde3a169d8..57af3d97be77 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -11,6 +11,8 @@ #include "omap_drv.h" +MODULE_IMPORT_NS(DMA_BUF); + /* ----------------------------------------------------------------------------- * DMABUF Export */ @@ -21,45 +23,21 @@ static struct sg_table *omap_gem_map_dma_buf( { struct drm_gem_object *obj = attachment->dmabuf->priv; struct sg_table *sg; - dma_addr_t dma_addr; - int ret; - - sg = kzalloc(sizeof(*sg), GFP_KERNEL); - if (!sg) - return ERR_PTR(-ENOMEM); - - /* camera, etc, need physically contiguous.. but we need a - * better way to know this.. - */ - ret = omap_gem_pin(obj, &dma_addr); - if (ret) - goto out; - - ret = sg_alloc_table(sg, 1, GFP_KERNEL); - if (ret) - goto out; - - sg_init_table(sg->sgl, 1); - sg_dma_len(sg->sgl) = obj->size; - sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0); - sg_dma_address(sg->sgl) = dma_addr; + sg = omap_gem_get_sg(obj); + if (IS_ERR(sg)) + return sg; /* this must be after omap_gem_pin() to ensure we have pages attached */ omap_gem_dma_sync_buffer(obj, dir); return sg; -out: - kfree(sg); - return ERR_PTR(ret); } static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { struct drm_gem_object *obj = attachment->dmabuf->priv; - omap_gem_unpin(obj); - sg_free_table(sg); - kfree(sg); + omap_gem_put_sg(obj, sg); } static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, @@ -112,7 +90,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags) DEFINE_DMA_BUF_EXPORT_INFO(exp_info); exp_info.ops = &omap_dmabuf_ops; - exp_info.size = obj->size; + exp_info.size = omap_gem_mmap_size(obj); exp_info.flags = flags; exp_info.priv = obj; diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c new file mode 100644 index 000000000000..10730c9b2752 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_overlay.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Benoit Parrot <bparrot@ti.com> + */ + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +#include "omap_dmm_tiler.h" +#include "omap_drv.h" + +/* + * overlay funcs + */ +static const char * const overlay_id_to_name[] = { + [OMAP_DSS_GFX] = "gfx", + [OMAP_DSS_VIDEO1] = "vid1", + [OMAP_DSS_VIDEO2] = "vid2", + [OMAP_DSS_VIDEO3] = "vid3", +}; + +/* + * Find a free overlay with the required caps and supported fourcc + */ +static struct omap_hw_overlay * +omap_plane_find_free_overlay(struct drm_device *dev, struct drm_plane *hwoverlay_to_plane[], + u32 caps, u32 fourcc) +{ + struct omap_drm_private *priv = dev->dev_private; + int i; + + DBG("caps: %x fourcc: %x", caps, fourcc); + + for (i = 0; i < priv->num_ovls; i++) { + struct omap_hw_overlay *cur = priv->overlays[i]; + + DBG("%d: id: %d cur->caps: %x", + cur->idx, cur->id, cur->caps); + + /* skip if already in-use */ + if (hwoverlay_to_plane[cur->idx]) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + /* check supported format */ + if (!dispc_ovl_color_mode_supported(priv->dispc, + cur->id, fourcc)) + continue; + + return cur; + } + + DBG("no match"); + return NULL; +} + +/* + * Assign a new overlay to a plane with the required caps and supported fourcc + * If a plane need a new overlay, the previous one should have been released + * with omap_overlay_release() + * This should be called from the plane atomic_check() in order to prepare the + * next global overlay_map to be enabled when atomic transaction is valid. + */ +int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, + struct omap_hw_overlay **r_overlay) +{ + /* Get the global state of the current atomic transaction */ + struct omap_global_state *state = omap_get_global_state(s); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + struct omap_hw_overlay *ovl, *r_ovl; + + ovl = omap_plane_find_free_overlay(s->dev, overlay_map, caps, fourcc); + if (!ovl) + return -ENOMEM; + + overlay_map[ovl->idx] = plane; + *overlay = ovl; + + if (r_overlay) { + r_ovl = omap_plane_find_free_overlay(s->dev, overlay_map, + caps, fourcc); + if (!r_ovl) { + overlay_map[r_ovl->idx] = NULL; + *overlay = NULL; + return -ENOMEM; + } + + overlay_map[r_ovl->idx] = plane; + *r_overlay = r_ovl; + } + + DBG("%s: assign to plane %s caps %x", ovl->name, plane->name, caps); + + if (r_overlay) { + DBG("%s: assign to right of plane %s caps %x", + r_ovl->name, plane->name, caps); + } + + return 0; +} + +/* + * Release an overlay from a plane if the plane gets not visible or the plane + * need a new overlay if overlay caps changes. + * This should be called from the plane atomic_check() in order to prepare the + * next global overlay_map to be enabled when atomic transaction is valid. + */ +void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay) +{ + /* Get the global state of the current atomic transaction */ + struct omap_global_state *state = omap_get_global_state(s); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + + if (!overlay) + return; + + if (WARN_ON(!overlay_map[overlay->idx])) + return; + + DBG("%s: release from plane %s", overlay->name, overlay_map[overlay->idx]->name); + + overlay_map[overlay->idx] = NULL; +} + +/* + * Update an overlay state that was attached to a plane before the current atomic state. + * This should be called from the plane atomic_update() or atomic_disable(), + * where an overlay association to a plane could have changed between the old and current + * atomic state. + */ +void omap_overlay_update_state(struct omap_drm_private *priv, + struct omap_hw_overlay *overlay) +{ + struct omap_global_state *state = omap_get_existing_global_state(priv); + struct drm_plane **overlay_map = state->hwoverlay_to_plane; + + /* Check if this overlay is not used anymore, then disable it */ + if (!overlay_map[overlay->idx]) { + DBG("%s: disabled", overlay->name); + + /* disable the overlay */ + dispc_ovl_enable(priv->dispc, overlay->id, false); + } +} + +static void omap_overlay_destroy(struct omap_hw_overlay *overlay) +{ + kfree(overlay); +} + +static struct omap_hw_overlay *omap_overlay_init(enum omap_plane_id overlay_id, + enum omap_overlay_caps caps) +{ + struct omap_hw_overlay *overlay; + + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); + if (!overlay) + return ERR_PTR(-ENOMEM); + + overlay->name = overlay_id_to_name[overlay_id]; + overlay->id = overlay_id; + overlay->caps = caps; + + return overlay; +} + +int omap_hwoverlays_init(struct omap_drm_private *priv) +{ + static const enum omap_plane_id hw_plane_ids[] = { + OMAP_DSS_GFX, OMAP_DSS_VIDEO1, + OMAP_DSS_VIDEO2, OMAP_DSS_VIDEO3, + }; + u32 num_overlays = dispc_get_num_ovls(priv->dispc); + enum omap_overlay_caps caps; + int i, ret; + + for (i = 0; i < num_overlays; i++) { + struct omap_hw_overlay *overlay; + + caps = dispc_ovl_get_caps(priv->dispc, hw_plane_ids[i]); + overlay = omap_overlay_init(hw_plane_ids[i], caps); + if (IS_ERR(overlay)) { + ret = PTR_ERR(overlay); + dev_err(priv->dev, "failed to construct overlay for %s (%d)\n", + overlay_id_to_name[i], ret); + omap_hwoverlays_destroy(priv); + return ret; + } + overlay->idx = priv->num_ovls; + priv->overlays[priv->num_ovls++] = overlay; + } + + return 0; +} + +void omap_hwoverlays_destroy(struct omap_drm_private *priv) +{ + int i; + + for (i = 0; i < priv->num_ovls; i++) { + omap_overlay_destroy(priv->overlays[i]); + priv->overlays[i] = NULL; + } + + priv->num_ovls = 0; +} diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.h b/drivers/gpu/drm/omapdrm/omap_overlay.h new file mode 100644 index 000000000000..e36a43f35563 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/omap_overlay.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Benoit Parrot <bparrot@ti.com> + */ + +#ifndef __OMAPDRM_OVERLAY_H__ +#define __OMAPDRM_OVERLAY_H__ + +#include <linux/types.h> + +enum drm_plane_type; + +struct drm_device; +struct drm_mode_object; +struct drm_plane; + +/* Used to associate a HW overlay/plane to a plane */ +struct omap_hw_overlay { + unsigned int idx; + + const char *name; + enum omap_plane_id id; + + enum omap_overlay_caps caps; +}; + +int omap_hwoverlays_init(struct omap_drm_private *priv); +void omap_hwoverlays_destroy(struct omap_drm_private *priv); +int omap_overlay_assign(struct drm_atomic_state *s, struct drm_plane *plane, + u32 caps, u32 fourcc, struct omap_hw_overlay **overlay, + struct omap_hw_overlay **r_overlay); +void omap_overlay_release(struct drm_atomic_state *s, struct omap_hw_overlay *overlay); +void omap_overlay_update_state(struct omap_drm_private *priv, struct omap_hw_overlay *overlay); +#endif /* __OMAPDRM_OVERLAY_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 512af976b7e9..b35205c4e979 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_fourcc.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" @@ -16,14 +17,30 @@ * plane funcs */ +#define to_omap_plane_state(x) container_of(x, struct omap_plane_state, base) + +struct omap_plane_state { + /* Must be first. */ + struct drm_plane_state base; + + struct omap_hw_overlay *overlay; + struct omap_hw_overlay *r_overlay; /* right overlay */ +}; + #define to_omap_plane(x) container_of(x, struct omap_plane, base) struct omap_plane { struct drm_plane base; enum omap_plane_id id; - const char *name; }; +bool is_omap_plane_dual_overlay(struct drm_plane_state *state) +{ + struct omap_plane_state *omap_state = to_omap_plane_state(state); + + return !!omap_state->r_overlay; +} + static int omap_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -46,13 +63,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct omap_drm_private *priv = plane->dev->dev_private; - struct omap_plane *omap_plane = to_omap_plane(plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); - struct omap_overlay_info info; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_plane_state *new_omap_state; + struct omap_plane_state *old_omap_state; + struct omap_overlay_info info, r_info; + enum omap_plane_id ovl_id, r_ovl_id; int ret; + bool dual_ovl; + + new_omap_state = to_omap_plane_state(new_state); + old_omap_state = to_omap_plane_state(old_state); - DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc, + dual_ovl = is_omap_plane_dual_overlay(new_state); + + /* Cleanup previously held overlay if needed */ + if (old_omap_state->overlay) + omap_overlay_update_state(priv, old_omap_state->overlay); + if (old_omap_state->r_overlay) + omap_overlay_update_state(priv, old_omap_state->r_overlay); + + if (!new_omap_state->overlay) { + DBG("[PLANE:%d:%s] no overlay attached", plane->base.id, plane->name); + return; + } + + ovl_id = new_omap_state->overlay->id; + DBG("%s, crtc=%p fb=%p", plane->name, new_state->crtc, new_state->fb); memset(&info, 0, sizeof(info)); @@ -67,65 +106,155 @@ static void omap_plane_atomic_update(struct drm_plane *plane, info.color_encoding = new_state->color_encoding; info.color_range = new_state->color_range; + r_info = info; + /* update scanout: */ - omap_framebuffer_update_scanout(new_state->fb, new_state, &info); + omap_framebuffer_update_scanout(new_state->fb, new_state, &info, + dual_ovl ? &r_info : NULL); - DBG("%dx%d -> %dx%d (%d)", info.width, info.height, - info.out_width, info.out_height, - info.screen_width); + DBG("%s: %dx%d -> %dx%d (%d)", + new_omap_state->overlay->name, info.width, info.height, + info.out_width, info.out_height, info.screen_width); DBG("%d,%d %pad %pad", info.pos_x, info.pos_y, &info.paddr, &info.p_uv_addr); + if (dual_ovl) { + r_ovl_id = new_omap_state->r_overlay->id; + /* + * If the current plane uses 2 hw planes the very next + * zorder is used by the r_overlay so we just use the + * main overlay zorder + 1 + */ + r_info.zorder = info.zorder + 1; + + DBG("%s: %dx%d -> %dx%d (%d)", + new_omap_state->r_overlay->name, + r_info.width, r_info.height, + r_info.out_width, r_info.out_height, r_info.screen_width); + DBG("%d,%d %pad %pad", r_info.pos_x, r_info.pos_y, + &r_info.paddr, &r_info.p_uv_addr); + } + /* and finally, update omapdss: */ - ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, + ret = dispc_ovl_setup(priv->dispc, ovl_id, &info, omap_crtc_timings(new_state->crtc), false, omap_crtc_channel(new_state->crtc)); if (ret) { dev_err(plane->dev->dev, "Failed to setup plane %s\n", - omap_plane->name); - dispc_ovl_enable(priv->dispc, omap_plane->id, false); + plane->name); + dispc_ovl_enable(priv->dispc, ovl_id, false); return; } - dispc_ovl_enable(priv->dispc, omap_plane->id, true); + dispc_ovl_enable(priv->dispc, ovl_id, true); + + if (dual_ovl) { + ret = dispc_ovl_setup(priv->dispc, r_ovl_id, &r_info, + omap_crtc_timings(new_state->crtc), false, + omap_crtc_channel(new_state->crtc)); + if (ret) { + dev_err(plane->dev->dev, "Failed to setup plane right-overlay %s\n", + plane->name); + dispc_ovl_enable(priv->dispc, r_ovl_id, false); + dispc_ovl_enable(priv->dispc, ovl_id, false); + return; + } + + dispc_ovl_enable(priv->dispc, r_ovl_id, true); + } } static void omap_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); struct omap_drm_private *priv = plane->dev->dev_private; struct omap_plane *omap_plane = to_omap_plane(plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_plane_state *new_omap_state; + struct omap_plane_state *old_omap_state; + + new_omap_state = to_omap_plane_state(new_state); + old_omap_state = to_omap_plane_state(old_state); + + if (!old_omap_state->overlay) + return; new_state->rotation = DRM_MODE_ROTATE_0; new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; - dispc_ovl_enable(priv->dispc, omap_plane->id, false); + omap_overlay_update_state(priv, old_omap_state->overlay); + new_omap_state->overlay = NULL; + + if (is_omap_plane_dual_overlay(old_state)) { + omap_overlay_update_state(priv, old_omap_state->r_overlay); + new_omap_state->r_overlay = NULL; + } } +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) + static int omap_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, + plane); + struct omap_drm_private *priv = plane->dev->dev_private; + struct omap_plane_state *omap_state = to_omap_plane_state(new_plane_state); + struct omap_global_state *omap_overlay_global_state; struct drm_crtc_state *crtc_state; + bool new_r_hw_overlay = false; + bool new_hw_overlay = false; + u32 max_width, max_height; + struct drm_crtc *crtc; + u16 width, height; + u32 caps = 0; + u32 fourcc; + int ret; - if (!new_plane_state->fb) - return 0; + omap_overlay_global_state = omap_get_global_state(state); + if (IS_ERR(omap_overlay_global_state)) + return PTR_ERR(omap_overlay_global_state); + + dispc_ovl_get_max_size(priv->dispc, &width, &height); + max_width = width << 16; + max_height = height << 16; - /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */ - if (WARN_ON(!new_plane_state->crtc)) + crtc = new_plane_state->crtc ? new_plane_state->crtc : plane->state->crtc; + if (!crtc) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, - new_plane_state->crtc); + crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); /* we should have a crtc state if the plane is attached to a crtc */ if (WARN_ON(!crtc_state)) return 0; - if (!crtc_state->enable) + /* + * Note: these are just sanity checks to filter out totally bad scaling + * factors. The real limits must be calculated case by case, and + * unfortunately we currently do those checks only at the commit + * phase in dispc. + */ + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + FRAC_16_16(1, 8), FRAC_16_16(8, 1), + true, true); + if (ret) + return ret; + + DBG("%s: visible %d -> %d", plane->name, + old_plane_state->visible, new_plane_state->visible); + + if (!new_plane_state->visible) { + omap_overlay_release(state, omap_state->overlay); + omap_overlay_release(state, omap_state->r_overlay); + omap_state->overlay = NULL; + omap_state->r_overlay = NULL; return 0; + } if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) return -EINVAL; @@ -136,10 +265,96 @@ static int omap_plane_atomic_check(struct drm_plane *plane, if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay) return -EINVAL; + /* Make sure dimensions are within bounds. */ + if (new_plane_state->src_h > max_height || new_plane_state->crtc_h > height) + return -EINVAL; + + + if (new_plane_state->src_w > max_width || new_plane_state->crtc_w > width) { + bool is_fourcc_yuv = new_plane_state->fb->format->is_yuv; + + if (is_fourcc_yuv && (((new_plane_state->src_w >> 16) / 2 & 1) || + new_plane_state->crtc_w / 2 & 1)) { + /* + * When calculating the split overlay width + * and it yield an odd value we will need to adjust + * the indivual width +/- 1. So make sure it fits + */ + if (new_plane_state->src_w <= ((2 * width - 1) << 16) && + new_plane_state->crtc_w <= (2 * width - 1)) + new_r_hw_overlay = true; + else + return -EINVAL; + } else { + if (new_plane_state->src_w <= (2 * max_width) && + new_plane_state->crtc_w <= (2 * width)) + new_r_hw_overlay = true; + else + return -EINVAL; + } + } + if (new_plane_state->rotation != DRM_MODE_ROTATE_0 && !omap_framebuffer_supports_rotation(new_plane_state->fb)) return -EINVAL; + if ((new_plane_state->src_w >> 16) != new_plane_state->crtc_w || + (new_plane_state->src_h >> 16) != new_plane_state->crtc_h) + caps |= OMAP_DSS_OVL_CAP_SCALE; + + fourcc = new_plane_state->fb->format->format; + + /* + * (re)allocate hw overlay if we don't have one or + * there is a caps mismatch + */ + if (!omap_state->overlay || (caps & ~omap_state->overlay->caps)) { + new_hw_overlay = true; + } else { + /* check supported format */ + if (!dispc_ovl_color_mode_supported(priv->dispc, omap_state->overlay->id, + fourcc)) + new_hw_overlay = true; + } + + /* + * check if we need two overlays and only have 1 or + * if we had 2 overlays but will only need 1 + */ + if ((new_r_hw_overlay && !omap_state->r_overlay) || + (!new_r_hw_overlay && omap_state->r_overlay)) + new_hw_overlay = true; + + if (new_hw_overlay) { + struct omap_hw_overlay *old_ovl = omap_state->overlay; + struct omap_hw_overlay *old_r_ovl = omap_state->r_overlay; + struct omap_hw_overlay *new_ovl = NULL; + struct omap_hw_overlay *new_r_ovl = NULL; + + omap_overlay_release(state, old_ovl); + omap_overlay_release(state, old_r_ovl); + + ret = omap_overlay_assign(state, plane, caps, fourcc, &new_ovl, + new_r_hw_overlay ? &new_r_ovl : NULL); + if (ret) { + DBG("%s: failed to assign hw_overlay", plane->name); + omap_state->overlay = NULL; + omap_state->r_overlay = NULL; + return ret; + } + + omap_state->overlay = new_ovl; + if (new_r_hw_overlay) + omap_state->r_overlay = new_r_ovl; + else + omap_state->r_overlay = NULL; + } + + DBG("plane: %s overlay_id: %d", plane->name, omap_state->overlay->id); + + if (omap_state->r_overlay) + DBG("plane: %s r_overlay_id: %d", plane->name, omap_state->r_overlay->id); + return 0; } @@ -155,7 +370,7 @@ static void omap_plane_destroy(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); - DBG("%s", omap_plane->name); + DBG("%s", plane->name); drm_plane_cleanup(plane); @@ -189,11 +404,17 @@ void omap_plane_install_properties(struct drm_plane *plane, static void omap_plane_reset(struct drm_plane *plane) { struct omap_plane *omap_plane = to_omap_plane(plane); + struct omap_plane_state *omap_state; + + if (plane->state) + drm_atomic_helper_plane_destroy_state(plane, plane->state); - drm_atomic_helper_plane_reset(plane); - if (!plane->state) + omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL); + if (!omap_state) return; + __drm_atomic_helper_plane_reset(plane, &omap_state->base); + /* * Set the zpos default depending on whether we are a primary or overlay * plane. @@ -204,6 +425,47 @@ static void omap_plane_reset(struct drm_plane *plane) plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; } +static struct drm_plane_state * +omap_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct omap_plane_state *state, *current_state; + + if (WARN_ON(!plane->state)) + return NULL; + + current_state = to_omap_plane_state(plane->state); + + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &state->base); + + state->overlay = current_state->overlay; + state->r_overlay = current_state->r_overlay; + + return &state->base; +} + +static void omap_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct omap_plane_state *omap_state = to_omap_plane_state(state); + + if (omap_state->overlay) + drm_printf(p, "\toverlay=%s (caps=0x%x)\n", + omap_state->overlay->name, + omap_state->overlay->caps); + else + drm_printf(p, "\toverlay=None\n"); + if (omap_state->r_overlay) + drm_printf(p, "\tr_overlay=%s (caps=0x%x)\n", + omap_state->r_overlay->name, + omap_state->r_overlay->caps); + else + drm_printf(p, "\tr_overlay=None\n"); +} + static int omap_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, @@ -239,10 +501,11 @@ static const struct drm_plane_funcs omap_plane_funcs = { .disable_plane = drm_atomic_helper_disable_plane, .reset = omap_plane_reset, .destroy = omap_plane_destroy, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_duplicate_state = omap_plane_atomic_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .atomic_set_property = omap_plane_atomic_set_property, .atomic_get_property = omap_plane_atomic_get_property, + .atomic_print_state = omap_plane_atomic_print_state, }; static bool omap_plane_supports_yuv(struct drm_plane *plane) @@ -261,20 +524,6 @@ static bool omap_plane_supports_yuv(struct drm_plane *plane) return false; } -static const char *plane_id_to_name[] = { - [OMAP_DSS_GFX] = "gfx", - [OMAP_DSS_VIDEO1] = "vid1", - [OMAP_DSS_VIDEO2] = "vid2", - [OMAP_DSS_VIDEO3] = "vid3", -}; - -static const enum omap_plane_id plane_idx_to_id[] = { - OMAP_DSS_GFX, - OMAP_DSS_VIDEO1, - OMAP_DSS_VIDEO2, - OMAP_DSS_VIDEO3, -}; - /* initialize plane */ struct drm_plane *omap_plane_init(struct drm_device *dev, int idx, enum drm_plane_type type, @@ -284,27 +533,25 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, unsigned int num_planes = dispc_get_num_ovls(priv->dispc); struct drm_plane *plane; struct omap_plane *omap_plane; - enum omap_plane_id id; int ret; u32 nformats; const u32 *formats; - if (WARN_ON(idx >= ARRAY_SIZE(plane_idx_to_id))) + if (WARN_ON(idx >= num_planes)) return ERR_PTR(-EINVAL); - id = plane_idx_to_id[idx]; - - DBG("%s: type=%d", plane_id_to_name[id], type); - omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); if (!omap_plane) return ERR_PTR(-ENOMEM); - formats = dispc_ovl_get_color_modes(priv->dispc, id); + omap_plane->id = idx; + + DBG("%d: type=%d", omap_plane->id, type); + DBG(" crtc_mask: 0x%04x", possible_crtcs); + + formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id); for (nformats = 0; formats[nformats]; ++nformats) ; - omap_plane->id = id; - omap_plane->name = plane_id_to_name[id]; plane = &omap_plane->base; @@ -334,8 +581,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, return plane; error: - dev_err(dev->dev, "%s(): could not create plane: %s\n", - __func__, plane_id_to_name[id]); + dev_err(dev->dev, "%s(): could not create plane: %d\n", + __func__, omap_plane->id); kfree(omap_plane); return NULL; diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h index 0c28fe8ffa20..a9a33e12722a 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.h +++ b/drivers/gpu/drm/omapdrm/omap_plane.h @@ -22,5 +22,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, u32 possible_crtcs); void omap_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); +bool is_omap_plane_dual_overlay(struct drm_plane_state *state); #endif /* __OMAPDRM_PLANE_H__ */ diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 8338dc665301..fde0208ec01e 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm * @@ -6,15 +7,6 @@ * Andy Gross <andy.gross@ti.com> * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - * */ #include <linux/init.h> #include <linux/module.h> diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index eb01549a6ccb..434c2861bb40 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -37,6 +37,17 @@ config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 NT35596 1080x1920 video mode panel as found in some Asus Zenfone 2 Laser Z00T devices. +config DRM_PANEL_BOE_BF060Y8M_AJ0 + tristate "Boe BF060Y8M-AJ0 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Boe BF060Y8M-AJ0 + 5.99" AMOLED modules. The panel has a 1080x2160 resolution and + uses 24 bit RGB per pixel. It provides a MIPI DSI interface to + the host and backlight is controlled through DSI commands. + config DRM_PANEL_BOE_HIMAX8279D tristate "Boe Himax8279d panel" depends on OF @@ -141,7 +152,7 @@ config DRM_PANEL_ILITEK_ILI9341 tristate "Ilitek ILI9341 240x320 QVGA panels" depends on OF && SPI depends on DRM_KMS_HELPER - depends on DRM_KMS_CMA_HELPER + depends on DRM_GEM_CMA_HELPER depends on BACKLIGHT_CLASS_DEVICE select DRM_MIPI_DBI help @@ -189,6 +200,15 @@ config DRM_PANEL_JDI_LT070ME05000 The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses 24 bit per pixel. +config DRM_PANEL_JDI_R63452 + tristate "JDI R63452 Full HD DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the JDI R63452 + DSI command mode panel as found in Xiaomi Mi 5 Devices. + config DRM_PANEL_KHADAS_TS050 tristate "Khadas TS050 panel" depends on OF @@ -272,6 +292,17 @@ config DRM_PANEL_NOVATEK_NT35510 around the Novatek NT35510 display controller, such as some Hydis panels. +config DRM_PANEL_NOVATEK_NT35950 + tristate "Novatek NT35950 DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the panels built + around the Novatek NT35950 display controller, such as some + Sharp panels used in Sony Xperia Z5 Premium and XZ Premium + mobile phones. + config DRM_PANEL_NOVATEK_NT36672A tristate "Novatek NT36672A DSI panel" depends on OF @@ -307,6 +338,7 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO depends on OF depends on I2C depends on BACKLIGHT_CLASS_DEVICE + select CRC32 help The panel is used with different sizes LCDs, from 480x272 to 1280x800, and 24 bit per pixel. @@ -579,6 +611,16 @@ config DRM_PANEL_SONY_ACX565AKM Say Y here if you want to enable support for the Sony ACX565AKM 800x600 3.5" panel (found on the Nokia N900). +config DRM_PANEL_SONY_TULIP_TRULY_NT35521 + tristate "Sony Tulip Truly NT35521 panel" + depends on GPIOLIB && OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for the Sony Tulip + NT35521 1280x720 video mode panel as found on Sony Xperia M4 + Aqua phone. + config DRM_PANEL_TDO_TL070WSH30 tristate "TDO TL070WSH30 DSI panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index bca4cc1f2715..d99fbbce49d1 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_DRM_PANEL_ABT_Y030XX067A) += panel-abt-y030xx067a.o obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o +obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o @@ -17,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o +obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o @@ -25,6 +27,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o +obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o @@ -59,6 +62,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o diff --git a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c index 2d8794d495d0..f043b484055b 100644 --- a/drivers/gpu/drm/panel/panel-abt-y030xx067a.c +++ b/drivers/gpu/drm/panel/panel-abt-y030xx067a.c @@ -146,8 +146,8 @@ static const struct reg_sequence y030xx067a_init_sequence[] = { { 0x09, REG09_SUB_BRIGHT_R(0x20) }, { 0x0a, REG0A_SUB_BRIGHT_B(0x20) }, { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN }, - { 0x0c, REG0C_CONTRAST_R(0x10) }, - { 0x0d, REG0D_CONTRAST_G(0x10) }, + { 0x0c, REG0C_CONTRAST_R(0x00) }, + { 0x0d, REG0D_CONTRAST_G(0x00) }, { 0x0e, REG0E_CONTRAST_B(0x10) }, { 0x0f, 0 }, { 0x10, REG10_BRIGHT(0x7f) }, @@ -272,16 +272,14 @@ static int y030xx067a_probe(struct spi_device *spi) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(priv->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(priv->supply); - } + if (IS_ERR(priv->supply)) + return dev_err_probe(dev, PTR_ERR(priv->supply), + "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(priv->reset_gpio); - } + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI); diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c new file mode 100644 index 000000000000..ef00cd67dc40 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * BOE BF060Y8M-AJ0 5.99" MIPI-DSI OLED Panel on SW43404 DriverIC + * + * Copyright (c) 2020 AngeloGioacchino Del Regno + * <angelogioacchino.delregno@somainline.org> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> +#include <video/mipi_display.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define DCS_ALLOW_HBM_RANGE 0x0c +#define DCS_DISALLOW_HBM_RANGE 0x08 + +enum boe_bf060y8m_aj0_supplies { + BF060Y8M_VREG_VCC, + BF060Y8M_VREG_VDDIO, + BF060Y8M_VREG_VCI, + BF060Y8M_VREG_EL_VDD, + BF060Y8M_VREG_EL_VSS, + BF060Y8M_VREG_MAX +}; + +struct boe_bf060y8m_aj0 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data vregs[BF060Y8M_VREG_MAX]; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline +struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel) +{ + return container_of(panel, struct boe_bf060y8m_aj0, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe) +{ + gpiod_set_value_cansleep(boe->reset_gpio, 0); + usleep_range(2000, 3000); + gpiod_set_value_cansleep(boe->reset_gpio, 1); + usleep_range(15000, 16000); + gpiod_set_value_cansleep(boe->reset_gpio, 0); + usleep_range(5000, 6000); +} + +static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe) +{ + struct mipi_dsi_device *dsi = boe->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); + dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c); + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE); + dsi_dcs_write_seq(dsi, 0xf8, + 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(30); + + dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); + dsi_dcs_write_seq(dsi, 0xc0, + 0x08, 0x48, 0x65, 0x33, 0x33, 0x33, + 0x2a, 0x31, 0x39, 0x20, 0x09); + dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, + 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); + dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92, + 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83, + 0x5c, 0x5c, 0x5c); + dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e); + + msleep(30); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(50); + + return 0; +} + +static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe) +{ + struct mipi_dsi_device *dsi = boe->dsi; + struct device *dev = &dsi->dev; + int ret; + + /* OFF commands sent in HS mode */ + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(20); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + usleep_range(1000, 2000); + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel) +{ + struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); + struct device *dev = &boe->dsi->dev; + int ret; + + if (boe->prepared) + return 0; + + /* + * Enable EL Driving Voltage first - doing that at the beginning + * or at the end of the power sequence doesn't matter, so enable + * it here to avoid yet another usleep at the end. + */ + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); + if (ret) + return ret; + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); + if (ret) + goto err_elvss; + + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCC].consumer); + if (ret) + goto err_vcc; + usleep_range(1000, 2000); + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); + if (ret) + goto err_vddio; + usleep_range(500, 1000); + ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCI].consumer); + if (ret) + goto err_vci; + usleep_range(2000, 3000); + + boe_bf060y8m_aj0_reset(boe); + + ret = boe_bf060y8m_aj0_on(boe); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(boe->reset_gpio, 1); + return ret; + } + + boe->prepared = true; + return 0; + +err_vci: + regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); +err_vddio: + regulator_disable(boe->vregs[BF060Y8M_VREG_VCC].consumer); +err_vcc: + regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); +err_elvss: + regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); + return ret; +} + +static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel) +{ + struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); + struct device *dev = &boe->dsi->dev; + int ret; + + if (!boe->prepared) + return 0; + + ret = boe_bf060y8m_aj0_off(boe); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(boe->reset_gpio, 1); + ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs); + + boe->prepared = false; + return 0; +} + +static const struct drm_display_mode boe_bf060y8m_aj0_mode = { + .clock = 165268, + .hdisplay = 1080, + .hsync_start = 1080 + 36, + .hsync_end = 1080 + 36 + 24, + .htotal = 1080 + 36 + 24 + 96, + .vdisplay = 2160, + .vsync_start = 2160 + 16, + .vsync_end = 2160 + 16 + 1, + .vtotal = 2160 + 16 + 1 + 15, + .width_mm = 68, /* 68.04 mm */ + .height_mm = 136, /* 136.08 mm */ +}; + +static int boe_bf060y8m_aj0_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &boe_bf060y8m_aj0_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs boe_bf060y8m_aj0_panel_funcs = { + .prepare = boe_bf060y8m_aj0_prepare, + .unprepare = boe_bf060y8m_aj0_unprepare, + .get_modes = boe_bf060y8m_aj0_get_modes, +}; + +static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) + return ret; + + return 0; +} + +static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + if (ret < 0) + return ret; + + return brightness & 0xff; +} + +static const struct backlight_ops boe_bf060y8m_aj0_bl_ops = { + .update_status = boe_bf060y8m_aj0_bl_update_status, + .get_brightness = boe_bf060y8m_aj0_bl_get_brightness, +}; + +static struct backlight_device * +boe_bf060y8m_aj0_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 127, + .max_brightness = 255, + .scale = BACKLIGHT_SCALE_NON_LINEAR, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &boe_bf060y8m_aj0_bl_ops, &props); +} + +static int boe_bf060y8m_aj0_init_vregs(struct boe_bf060y8m_aj0 *boe, + struct device *dev) +{ + struct regulator *vreg; + int ret; + + boe->vregs[BF060Y8M_VREG_VCC].supply = "vcc"; + boe->vregs[BF060Y8M_VREG_VDDIO].supply = "vddio"; + boe->vregs[BF060Y8M_VREG_VCI].supply = "vci"; + boe->vregs[BF060Y8M_VREG_EL_VDD].supply = "elvdd"; + boe->vregs[BF060Y8M_VREG_EL_VSS].supply = "elvss"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(boe->vregs), + boe->vregs); + if (ret < 0) { + dev_err(dev, "Failed to get regulators: %d\n", ret); + return ret; + } + + vreg = boe->vregs[BF060Y8M_VREG_VCC].consumer; + ret = regulator_is_supported_voltage(vreg, 2700000, 3600000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; + ret = regulator_is_supported_voltage(vreg, 1620000, 1980000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; + ret = regulator_is_supported_voltage(vreg, 2600000, 3600000); + if (!ret) + return ret; + + vreg = boe->vregs[BF060Y8M_VREG_EL_VDD].consumer; + ret = regulator_is_supported_voltage(vreg, 4400000, 4800000); + if (!ret) + return ret; + + /* ELVSS is negative: -5.00V to -1.40V */ + vreg = boe->vregs[BF060Y8M_VREG_EL_VSS].consumer; + ret = regulator_is_supported_voltage(vreg, 1400000, 5000000); + if (!ret) + return ret; + + /* + * Set min/max rated current, known only for VCI and VDDIO and, + * in case of failure, just go on gracefully, as this step is not + * guaranteed to succeed on all regulator HW but do a debug print + * to inform the developer during debugging. + * In any case, these two supplies are also optional, so they may + * be fixed-regulator which, at the time of writing, does not + * support fake current limiting. + */ + vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; + ret = regulator_set_current_limit(vreg, 1500, 2500); + if (ret) + dev_dbg(dev, "Current limit cannot be set on %s: %d\n", + boe->vregs[1].supply, ret); + + vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; + ret = regulator_set_current_limit(vreg, 20000, 40000); + if (ret) + dev_dbg(dev, "Current limit cannot be set on %s: %d\n", + boe->vregs[2].supply, ret); + + return 0; +} + +static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct boe_bf060y8m_aj0 *boe; + int ret; + + boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL); + if (!boe) + return -ENOMEM; + + ret = boe_bf060y8m_aj0_init_vregs(boe, dev); + if (ret) + return dev_err_probe(dev, ret, + "Failed to initialize supplies.\n"); + + boe->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(boe->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(boe->reset_gpio), + "Failed to get reset-gpios\n"); + + boe->dsi = dsi; + mipi_dsi_set_drvdata(dsi, boe); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_MODE_VIDEO_SYNC_PULSE | + MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_LPM; + + drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi); + if (IS_ERR(boe->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(boe->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&boe->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + return ret; + } + + return 0; +} + +static int boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi) +{ + struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&boe->panel); + + return 0; +} + +static const struct of_device_id boe_bf060y8m_aj0_of_match[] = { + { .compatible = "boe,bf060y8m-aj0" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, boe_bf060y8m_aj0_of_match); + +static struct mipi_dsi_driver boe_bf060y8m_aj0_driver = { + .probe = boe_bf060y8m_aj0_probe, + .remove = boe_bf060y8m_aj0_remove, + .driver = { + .name = "panel-sw43404-boe-fhd-amoled", + .of_match_table = boe_bf060y8m_aj0_of_match, + }, +}; +module_mipi_dsi_driver(boe_bf060y8m_aj0_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>"); +MODULE_DESCRIPTION("BOE BF060Y8M-AJ0 MIPI-DSI OLED panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 529561b4fbbc..5fcbde789ddb 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -84,8 +84,8 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x0D, 0x63), _INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0F, 0x73), - _INIT_DCS_CMD(0x95, 0xEB), - _INIT_DCS_CMD(0x96, 0xEB), + _INIT_DCS_CMD(0x95, 0xE6), + _INIT_DCS_CMD(0x96, 0xF0), _INIT_DCS_CMD(0x30, 0x11), _INIT_DCS_CMD(0x6D, 0x66), _INIT_DCS_CMD(0x75, 0xA2), @@ -111,18 +111,18 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1), _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29), _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73), - _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7), + _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xA7, 0x03, 0xCF, 0x03, 0xDE, 0x03, 0xE0), _INIT_DCS_CMD(0xFF, 0x24), _INIT_DCS_CMD(0xFB, 0x01), @@ -225,6 +225,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x7F, 0x3C), _INIT_DCS_CMD(0x82, 0x04), _INIT_DCS_CMD(0x97, 0xC0), + _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), _INIT_DCS_CMD(0x91, 0x44), _INIT_DCS_CMD(0x92, 0xA9), @@ -332,12 +333,39 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { _INIT_DCS_CMD(0x34, 0x78), _INIT_DCS_CMD(0x35, 0x16), _INIT_DCS_CMD(0xC8, 0x04), - _INIT_DCS_CMD(0xC9, 0x80), + _INIT_DCS_CMD(0xC9, 0x9E), _INIT_DCS_CMD(0xCA, 0x4E), _INIT_DCS_CMD(0xCB, 0x00), - _INIT_DCS_CMD(0xA9, 0x4C), - _INIT_DCS_CMD(0xAA, 0x47), + _INIT_DCS_CMD(0xA9, 0x49), + _INIT_DCS_CMD(0xAA, 0x4B), + _INIT_DCS_CMD(0xAB, 0x48), + _INIT_DCS_CMD(0xAC, 0x43), + _INIT_DCS_CMD(0xAD, 0x40), + _INIT_DCS_CMD(0xAE, 0x50), + _INIT_DCS_CMD(0xAF, 0x44), + _INIT_DCS_CMD(0xB0, 0x54), + _INIT_DCS_CMD(0xB1, 0x4E), + _INIT_DCS_CMD(0xB2, 0x4D), + _INIT_DCS_CMD(0xB3, 0x4C), + _INIT_DCS_CMD(0xB4, 0x41), + _INIT_DCS_CMD(0xB5, 0x47), + _INIT_DCS_CMD(0xB6, 0x53), + _INIT_DCS_CMD(0xB7, 0x3E), + _INIT_DCS_CMD(0xB8, 0x51), + _INIT_DCS_CMD(0xB9, 0x3C), + _INIT_DCS_CMD(0xBA, 0x3B), + _INIT_DCS_CMD(0xBB, 0x46), + _INIT_DCS_CMD(0xBC, 0x45), + _INIT_DCS_CMD(0xBD, 0x55), + _INIT_DCS_CMD(0xBE, 0x3D), + _INIT_DCS_CMD(0xBF, 0x3F), + _INIT_DCS_CMD(0xC0, 0x52), + _INIT_DCS_CMD(0xC1, 0x4A), + _INIT_DCS_CMD(0xC2, 0x39), + _INIT_DCS_CMD(0xC3, 0x4F), + _INIT_DCS_CMD(0xC4, 0x3A), + _INIT_DCS_CMD(0xC5, 0x42), _INIT_DCS_CMD(0xFF, 0x27), _INIT_DCS_CMD(0xFB, 0x01), @@ -419,7 +447,7 @@ static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = { {}, }; -static const struct panel_init_cmd inx_init_cmd[] = { +static const struct panel_init_cmd inx_hj110iz_init_cmd[] = { _INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x05, 0xD1), @@ -428,10 +456,10 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x08, 0x4B), _INIT_DCS_CMD(0x0E, 0x91), _INIT_DCS_CMD(0x0F, 0x69), - _INIT_DCS_CMD(0x95, 0xFF), - _INIT_DCS_CMD(0x96, 0xFF), - _INIT_DCS_CMD(0x9D, 0x0A), - _INIT_DCS_CMD(0x9E, 0x0A), + _INIT_DCS_CMD(0x95, 0xF5), + _INIT_DCS_CMD(0x96, 0xF5), + _INIT_DCS_CMD(0x9D, 0x00), + _INIT_DCS_CMD(0x9E, 0x00), _INIT_DCS_CMD(0x69, 0x98), _INIT_DCS_CMD(0x75, 0xA2), _INIT_DCS_CMD(0x77, 0xB3), @@ -493,17 +521,17 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x2A, 0x03), _INIT_DCS_CMD(0x2B, 0x03), - _INIT_DCS_CMD(0x2F, 0x06), + _INIT_DCS_CMD(0x2F, 0x05), _INIT_DCS_CMD(0x30, 0x32), _INIT_DCS_CMD(0x31, 0x43), - _INIT_DCS_CMD(0x33, 0x06), + _INIT_DCS_CMD(0x33, 0x05), _INIT_DCS_CMD(0x34, 0x32), _INIT_DCS_CMD(0x35, 0x43), _INIT_DCS_CMD(0x37, 0x44), _INIT_DCS_CMD(0x38, 0x40), _INIT_DCS_CMD(0x39, 0x00), - _INIT_DCS_CMD(0x3A, 0x01), - _INIT_DCS_CMD(0x3B, 0x48), + _INIT_DCS_CMD(0x3A, 0x18), + _INIT_DCS_CMD(0x3B, 0x00), _INIT_DCS_CMD(0x3D, 0x93), _INIT_DCS_CMD(0xAB, 0x44), _INIT_DCS_CMD(0xAC, 0x40), @@ -520,8 +548,8 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0x56, 0x08), _INIT_DCS_CMD(0x58, 0x21), _INIT_DCS_CMD(0x59, 0x40), - _INIT_DCS_CMD(0x5A, 0x09), - _INIT_DCS_CMD(0x5B, 0x48), + _INIT_DCS_CMD(0x5A, 0x00), + _INIT_DCS_CMD(0x5B, 0x2C), _INIT_DCS_CMD(0x5E, 0x00, 0x10), _INIT_DCS_CMD(0x5F, 0x00), @@ -558,33 +586,36 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xEF, 0x01), _INIT_DCS_CMD(0xF0, 0x7A), + _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00), _INIT_DCS_CMD(0xFF, 0x25), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x05, 0x00), + _INIT_DCS_CMD(0x13, 0x02), + _INIT_DCS_CMD(0x14, 0xDF), _INIT_DCS_CMD(0xF1, 0x10), _INIT_DCS_CMD(0x1E, 0x00), - _INIT_DCS_CMD(0x1F, 0x09), - _INIT_DCS_CMD(0x20, 0x46), + _INIT_DCS_CMD(0x1F, 0x00), + _INIT_DCS_CMD(0x20, 0x2C), _INIT_DCS_CMD(0x25, 0x00), - _INIT_DCS_CMD(0x26, 0x09), - _INIT_DCS_CMD(0x27, 0x46), + _INIT_DCS_CMD(0x26, 0x00), + _INIT_DCS_CMD(0x27, 0x2C), _INIT_DCS_CMD(0x3F, 0x80), _INIT_DCS_CMD(0x40, 0x00), _INIT_DCS_CMD(0x43, 0x00), - _INIT_DCS_CMD(0x44, 0x09), - _INIT_DCS_CMD(0x45, 0x46), + _INIT_DCS_CMD(0x44, 0x18), + _INIT_DCS_CMD(0x45, 0x00), - _INIT_DCS_CMD(0x48, 0x09), - _INIT_DCS_CMD(0x49, 0x46), + _INIT_DCS_CMD(0x48, 0x00), + _INIT_DCS_CMD(0x49, 0x2C), _INIT_DCS_CMD(0x5B, 0x80), _INIT_DCS_CMD(0x5C, 0x00), - _INIT_DCS_CMD(0x5D, 0x01), - _INIT_DCS_CMD(0x5E, 0x46), - _INIT_DCS_CMD(0x61, 0x01), - _INIT_DCS_CMD(0x62, 0x46), + _INIT_DCS_CMD(0x5D, 0x00), + _INIT_DCS_CMD(0x5E, 0x00), + _INIT_DCS_CMD(0x61, 0x00), + _INIT_DCS_CMD(0x62, 0x2C), _INIT_DCS_CMD(0x68, 0x10), _INIT_DCS_CMD(0xFF, 0x26), _INIT_DCS_CMD(0xFB, 0x01), @@ -700,16 +731,22 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xA3, 0x30), _INIT_DCS_CMD(0xA4, 0xC0), _INIT_DCS_CMD(0xE8, 0x00), + _INIT_DCS_CMD(0x97, 0x3C), + _INIT_DCS_CMD(0x98, 0x02), + _INIT_DCS_CMD(0x99, 0x95), + _INIT_DCS_CMD(0x9A, 0x06), + _INIT_DCS_CMD(0x9B, 0x00), + _INIT_DCS_CMD(0x9C, 0x0B), + _INIT_DCS_CMD(0x9D, 0x0A), + _INIT_DCS_CMD(0x9E, 0x90), _INIT_DCS_CMD(0xFF, 0xF0), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x3A, 0x08), _INIT_DCS_CMD(0xFF, 0xD0), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x00, 0x33), - _INIT_DCS_CMD(0x02, 0x77), _INIT_DCS_CMD(0x08, 0x01), _INIT_DCS_CMD(0x09, 0xBF), - _INIT_DCS_CMD(0x28, 0x30), _INIT_DCS_CMD(0x2F, 0x33), _INIT_DCS_CMD(0xFF, 0x23), _INIT_DCS_CMD(0xFB, 0x01), @@ -718,6 +755,9 @@ static const struct panel_init_cmd inx_init_cmd[] = { _INIT_DCS_CMD(0xFF, 0x20), _INIT_DCS_CMD(0xFB, 0x01), _INIT_DCS_CMD(0x30, 0x00), + _INIT_DCS_CMD(0xFF, 0x24), + _INIT_DCS_CMD(0x5C, 0x88), + _INIT_DCS_CMD(0x5D, 0x08), _INIT_DCS_CMD(0xFF, 0x10), _INIT_DCS_CMD(0xB9, 0x01), _INIT_DCS_CMD(0xFF, 0x20), @@ -1312,7 +1352,7 @@ static const struct panel_desc inx_hj110iz_desc = { | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_VIDEO_BURST, - .init_cmds = inx_init_cmd, + .init_cmds = inx_hj110iz_init_cmd, }; static const struct drm_display_mode boe_tv101wum_nl6_default_mode = { diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c index da4a69067e18..b58cb064975f 100644 --- a/drivers/gpu/drm/panel/panel-dsi-cm.c +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -248,7 +248,7 @@ static ssize_t num_dsi_errors_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", errors); + return sysfs_emit(buf, "%d\n", errors); } static ssize_t hw_revision_show(struct device *dev, @@ -268,7 +268,7 @@ static ssize_t hw_revision_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); + return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3); } static DEVICE_ATTR_RO(num_dsi_errors); diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index fc03046de134..176ef0c3cc1d 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -196,10 +196,10 @@ struct edp_panel_entry { /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */ u32 panel_id; - /* @delay: The power sequencing delays needed for this panel. */ + /** @delay: The power sequencing delays needed for this panel. */ const struct panel_delay *delay; - /* @name: Name of this panel (for printing to logs). */ + /** @name: Name of this panel (for printing to logs). */ const char *name; }; diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c index 2a602aee61c3..cb0bb3076099 100644 --- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c +++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c @@ -456,16 +456,13 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi) ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(&dsi->dev, "Couldn't get regulators\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(&dsi->dev, ret, "Couldn't get regulators\n"); ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); - } + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c index 581661b506f8..a9cd7135cb51 100644 --- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c @@ -200,22 +200,19 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); - if (IS_ERR(ctx->dvdd)) { - dev_err(&dsi->dev, "Couldn't get dvdd regulator\n"); - return PTR_ERR(ctx->dvdd); - } + if (IS_ERR(ctx->dvdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd), + "Couldn't get dvdd regulator\n"); ctx->avdd = devm_regulator_get(&dsi->dev, "avdd"); - if (IS_ERR(ctx->avdd)) { - dev_err(&dsi->dev, "Couldn't get avdd regulator\n"); - return PTR_ERR(ctx->avdd); - } + if (IS_ERR(ctx->avdd)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->avdd), + "Couldn't get avdd regulator\n"); ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); - } + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); ret = drm_panel_of_backlight(&ctx->panel); if (ret) @@ -227,7 +224,13 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; } static int feiyang_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 0145129d7c66..ba30d11547ad 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -42,6 +42,7 @@ struct ili9881c_desc { const struct ili9881c_instr *init; const size_t init_length; const struct drm_display_mode *mode; + const unsigned long mode_flags; }; struct ili9881c { @@ -51,6 +52,8 @@ struct ili9881c { struct regulator *power; struct gpio_desc *reset; + + enum drm_panel_orientation orientation; }; #define ILI9881C_SWITCH_PAGE_INSTR(_page) \ @@ -453,6 +456,213 @@ static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ }; +static const struct ili9881c_instr w552946ab_init[] = { + ILI9881C_SWITCH_PAGE_INSTR(3), + ILI9881C_COMMAND_INSTR(0x01, 0x00), + ILI9881C_COMMAND_INSTR(0x02, 0x00), + ILI9881C_COMMAND_INSTR(0x03, 0x53), + ILI9881C_COMMAND_INSTR(0x04, 0x53), + ILI9881C_COMMAND_INSTR(0x05, 0x13), + ILI9881C_COMMAND_INSTR(0x06, 0x04), + ILI9881C_COMMAND_INSTR(0x07, 0x02), + ILI9881C_COMMAND_INSTR(0x08, 0x02), + ILI9881C_COMMAND_INSTR(0x09, 0x00), + ILI9881C_COMMAND_INSTR(0x0A, 0x00), + ILI9881C_COMMAND_INSTR(0x0B, 0x00), + ILI9881C_COMMAND_INSTR(0x0C, 0x00), + ILI9881C_COMMAND_INSTR(0x0D, 0x00), + ILI9881C_COMMAND_INSTR(0x0E, 0x00), + ILI9881C_COMMAND_INSTR(0x0F, 0x00), + + ILI9881C_COMMAND_INSTR(0x10, 0x00), + ILI9881C_COMMAND_INSTR(0x11, 0x00), + ILI9881C_COMMAND_INSTR(0x12, 0x00), + ILI9881C_COMMAND_INSTR(0x13, 0x00), + ILI9881C_COMMAND_INSTR(0x14, 0x00), + ILI9881C_COMMAND_INSTR(0x15, 0x08), + ILI9881C_COMMAND_INSTR(0x16, 0x10), + ILI9881C_COMMAND_INSTR(0x17, 0x00), + ILI9881C_COMMAND_INSTR(0x18, 0x08), + ILI9881C_COMMAND_INSTR(0x19, 0x00), + ILI9881C_COMMAND_INSTR(0x1A, 0x00), + ILI9881C_COMMAND_INSTR(0x1B, 0x00), + ILI9881C_COMMAND_INSTR(0x1C, 0x00), + ILI9881C_COMMAND_INSTR(0x1D, 0x00), + ILI9881C_COMMAND_INSTR(0x1E, 0xC0), + ILI9881C_COMMAND_INSTR(0x1F, 0x80), + + ILI9881C_COMMAND_INSTR(0x20, 0x02), + ILI9881C_COMMAND_INSTR(0x21, 0x09), + ILI9881C_COMMAND_INSTR(0x22, 0x00), + ILI9881C_COMMAND_INSTR(0x23, 0x00), + ILI9881C_COMMAND_INSTR(0x24, 0x00), + ILI9881C_COMMAND_INSTR(0x25, 0x00), + ILI9881C_COMMAND_INSTR(0x26, 0x00), + ILI9881C_COMMAND_INSTR(0x27, 0x00), + ILI9881C_COMMAND_INSTR(0x28, 0x55), + ILI9881C_COMMAND_INSTR(0x29, 0x03), + ILI9881C_COMMAND_INSTR(0x2A, 0x00), + ILI9881C_COMMAND_INSTR(0x2B, 0x00), + ILI9881C_COMMAND_INSTR(0x2C, 0x00), + ILI9881C_COMMAND_INSTR(0x2D, 0x00), + ILI9881C_COMMAND_INSTR(0x2E, 0x00), + ILI9881C_COMMAND_INSTR(0x2F, 0x00), + + ILI9881C_COMMAND_INSTR(0x30, 0x00), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x32, 0x00), + ILI9881C_COMMAND_INSTR(0x33, 0x00), + ILI9881C_COMMAND_INSTR(0x34, 0x04), + ILI9881C_COMMAND_INSTR(0x35, 0x05), + ILI9881C_COMMAND_INSTR(0x36, 0x05), + ILI9881C_COMMAND_INSTR(0x37, 0x00), + ILI9881C_COMMAND_INSTR(0x38, 0x3C), + ILI9881C_COMMAND_INSTR(0x39, 0x35), + ILI9881C_COMMAND_INSTR(0x3A, 0x00), + ILI9881C_COMMAND_INSTR(0x3B, 0x40), + ILI9881C_COMMAND_INSTR(0x3C, 0x00), + ILI9881C_COMMAND_INSTR(0x3D, 0x00), + ILI9881C_COMMAND_INSTR(0x3E, 0x00), + ILI9881C_COMMAND_INSTR(0x3F, 0x00), + + ILI9881C_COMMAND_INSTR(0x40, 0x00), + ILI9881C_COMMAND_INSTR(0x41, 0x88), + ILI9881C_COMMAND_INSTR(0x42, 0x00), + ILI9881C_COMMAND_INSTR(0x43, 0x00), + ILI9881C_COMMAND_INSTR(0x44, 0x1F), + + ILI9881C_COMMAND_INSTR(0x50, 0x01), + ILI9881C_COMMAND_INSTR(0x51, 0x23), + ILI9881C_COMMAND_INSTR(0x52, 0x45), + ILI9881C_COMMAND_INSTR(0x53, 0x67), + ILI9881C_COMMAND_INSTR(0x54, 0x89), + ILI9881C_COMMAND_INSTR(0x55, 0xaB), + ILI9881C_COMMAND_INSTR(0x56, 0x01), + ILI9881C_COMMAND_INSTR(0x57, 0x23), + ILI9881C_COMMAND_INSTR(0x58, 0x45), + ILI9881C_COMMAND_INSTR(0x59, 0x67), + ILI9881C_COMMAND_INSTR(0x5A, 0x89), + ILI9881C_COMMAND_INSTR(0x5B, 0xAB), + ILI9881C_COMMAND_INSTR(0x5C, 0xCD), + ILI9881C_COMMAND_INSTR(0x5D, 0xEF), + ILI9881C_COMMAND_INSTR(0x5E, 0x03), + ILI9881C_COMMAND_INSTR(0x5F, 0x14), + + ILI9881C_COMMAND_INSTR(0x60, 0x15), + ILI9881C_COMMAND_INSTR(0x61, 0x0C), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x0E), + ILI9881C_COMMAND_INSTR(0x64, 0x0F), + ILI9881C_COMMAND_INSTR(0x65, 0x10), + ILI9881C_COMMAND_INSTR(0x66, 0x11), + ILI9881C_COMMAND_INSTR(0x67, 0x08), + ILI9881C_COMMAND_INSTR(0x68, 0x02), + ILI9881C_COMMAND_INSTR(0x69, 0x0A), + ILI9881C_COMMAND_INSTR(0x6A, 0x02), + ILI9881C_COMMAND_INSTR(0x6B, 0x02), + ILI9881C_COMMAND_INSTR(0x6C, 0x02), + ILI9881C_COMMAND_INSTR(0x6D, 0x02), + ILI9881C_COMMAND_INSTR(0x6E, 0x02), + ILI9881C_COMMAND_INSTR(0x6F, 0x02), + + ILI9881C_COMMAND_INSTR(0x70, 0x02), + ILI9881C_COMMAND_INSTR(0x71, 0x02), + ILI9881C_COMMAND_INSTR(0x72, 0x06), + ILI9881C_COMMAND_INSTR(0x73, 0x02), + ILI9881C_COMMAND_INSTR(0x74, 0x02), + ILI9881C_COMMAND_INSTR(0x75, 0x14), + ILI9881C_COMMAND_INSTR(0x76, 0x15), + ILI9881C_COMMAND_INSTR(0x77, 0x0F), + ILI9881C_COMMAND_INSTR(0x78, 0x0E), + ILI9881C_COMMAND_INSTR(0x79, 0x0D), + ILI9881C_COMMAND_INSTR(0x7A, 0x0C), + ILI9881C_COMMAND_INSTR(0x7B, 0x11), + ILI9881C_COMMAND_INSTR(0x7C, 0x10), + ILI9881C_COMMAND_INSTR(0x7D, 0x06), + ILI9881C_COMMAND_INSTR(0x7E, 0x02), + ILI9881C_COMMAND_INSTR(0x7F, 0x0A), + + ILI9881C_COMMAND_INSTR(0x80, 0x02), + ILI9881C_COMMAND_INSTR(0x81, 0x02), + ILI9881C_COMMAND_INSTR(0x82, 0x02), + ILI9881C_COMMAND_INSTR(0x83, 0x02), + ILI9881C_COMMAND_INSTR(0x84, 0x02), + ILI9881C_COMMAND_INSTR(0x85, 0x02), + ILI9881C_COMMAND_INSTR(0x86, 0x02), + ILI9881C_COMMAND_INSTR(0x87, 0x02), + ILI9881C_COMMAND_INSTR(0x88, 0x08), + ILI9881C_COMMAND_INSTR(0x89, 0x02), + ILI9881C_COMMAND_INSTR(0x8A, 0x02), + + ILI9881C_SWITCH_PAGE_INSTR(4), + ILI9881C_COMMAND_INSTR(0x00, 0x80), + ILI9881C_COMMAND_INSTR(0x70, 0x00), + ILI9881C_COMMAND_INSTR(0x71, 0x00), + ILI9881C_COMMAND_INSTR(0x66, 0xFE), + ILI9881C_COMMAND_INSTR(0x82, 0x15), + ILI9881C_COMMAND_INSTR(0x84, 0x15), + ILI9881C_COMMAND_INSTR(0x85, 0x15), + ILI9881C_COMMAND_INSTR(0x3a, 0x24), + ILI9881C_COMMAND_INSTR(0x32, 0xAC), + ILI9881C_COMMAND_INSTR(0x8C, 0x80), + ILI9881C_COMMAND_INSTR(0x3C, 0xF5), + ILI9881C_COMMAND_INSTR(0x88, 0x33), + + ILI9881C_SWITCH_PAGE_INSTR(1), + ILI9881C_COMMAND_INSTR(0x22, 0x0A), + ILI9881C_COMMAND_INSTR(0x31, 0x00), + ILI9881C_COMMAND_INSTR(0x53, 0x78), + ILI9881C_COMMAND_INSTR(0x50, 0x5B), + ILI9881C_COMMAND_INSTR(0x51, 0x5B), + ILI9881C_COMMAND_INSTR(0x60, 0x20), + ILI9881C_COMMAND_INSTR(0x61, 0x00), + ILI9881C_COMMAND_INSTR(0x62, 0x0D), + ILI9881C_COMMAND_INSTR(0x63, 0x00), + + ILI9881C_COMMAND_INSTR(0xA0, 0x00), + ILI9881C_COMMAND_INSTR(0xA1, 0x10), + ILI9881C_COMMAND_INSTR(0xA2, 0x1C), + ILI9881C_COMMAND_INSTR(0xA3, 0x13), + ILI9881C_COMMAND_INSTR(0xA4, 0x15), + ILI9881C_COMMAND_INSTR(0xA5, 0x26), + ILI9881C_COMMAND_INSTR(0xA6, 0x1A), + ILI9881C_COMMAND_INSTR(0xA7, 0x1D), + ILI9881C_COMMAND_INSTR(0xA8, 0x67), + ILI9881C_COMMAND_INSTR(0xA9, 0x1C), + ILI9881C_COMMAND_INSTR(0xAA, 0x29), + ILI9881C_COMMAND_INSTR(0xAB, 0x5B), + ILI9881C_COMMAND_INSTR(0xAC, 0x26), + ILI9881C_COMMAND_INSTR(0xAD, 0x28), + ILI9881C_COMMAND_INSTR(0xAE, 0x5C), + ILI9881C_COMMAND_INSTR(0xAF, 0x30), + ILI9881C_COMMAND_INSTR(0xB0, 0x31), + ILI9881C_COMMAND_INSTR(0xB1, 0x2E), + ILI9881C_COMMAND_INSTR(0xB2, 0x32), + ILI9881C_COMMAND_INSTR(0xB3, 0x00), + + ILI9881C_COMMAND_INSTR(0xC0, 0x00), + ILI9881C_COMMAND_INSTR(0xC1, 0x10), + ILI9881C_COMMAND_INSTR(0xC2, 0x1C), + ILI9881C_COMMAND_INSTR(0xC3, 0x13), + ILI9881C_COMMAND_INSTR(0xC4, 0x15), + ILI9881C_COMMAND_INSTR(0xC5, 0x26), + ILI9881C_COMMAND_INSTR(0xC6, 0x1A), + ILI9881C_COMMAND_INSTR(0xC7, 0x1D), + ILI9881C_COMMAND_INSTR(0xC8, 0x67), + ILI9881C_COMMAND_INSTR(0xC9, 0x1C), + ILI9881C_COMMAND_INSTR(0xCA, 0x29), + ILI9881C_COMMAND_INSTR(0xCB, 0x5B), + ILI9881C_COMMAND_INSTR(0xCC, 0x26), + ILI9881C_COMMAND_INSTR(0xCD, 0x28), + ILI9881C_COMMAND_INSTR(0xCE, 0x5C), + ILI9881C_COMMAND_INSTR(0xCF, 0x30), + ILI9881C_COMMAND_INSTR(0xD0, 0x31), + ILI9881C_COMMAND_INSTR(0xD1, 0x2E), + ILI9881C_COMMAND_INSTR(0xD2, 0x32), + ILI9881C_COMMAND_INSTR(0xD3, 0x00), + ILI9881C_SWITCH_PAGE_INSTR(0), +}; + static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel) { return container_of(panel, struct ili9881c, panel); @@ -590,19 +800,36 @@ static const struct drm_display_mode k101_im2byl02_default_mode = { .clock = 69700, .hdisplay = 800, - .hsync_start = 800 + 6, - .hsync_end = 800 + 6 + 15, - .htotal = 800 + 6 + 15 + 16, + .hsync_start = 800 + 52, + .hsync_end = 800 + 52 + 8, + .htotal = 800 + 52 + 8 + 48, .vdisplay = 1280, - .vsync_start = 1280 + 8, - .vsync_end = 1280 + 8 + 48, - .vtotal = 1280 + 8 + 48 + 52, + .vsync_start = 1280 + 16, + .vsync_end = 1280 + 16 + 6, + .vtotal = 1280 + 16 + 6 + 15, .width_mm = 135, .height_mm = 217, }; +static const struct drm_display_mode w552946aba_default_mode = { + .clock = 64000, + + .hdisplay = 720, + .hsync_start = 720 + 40, + .hsync_end = 720 + 40 + 10, + .htotal = 720 + 40 + 10 + 40, + + .vdisplay = 1280, + .vsync_start = 1280 + 22, + .vsync_end = 1280 + 22 + 4, + .vtotal = 1280 + 22 + 4 + 11, + + .width_mm = 68, + .height_mm = 121, +}; + static int ili9881c_get_modes(struct drm_panel *panel, struct drm_connector *connector) { @@ -626,6 +853,8 @@ static int ili9881c_get_modes(struct drm_panel *panel, connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; + drm_connector_set_panel_orientation(connector, ctx->orientation); + return 1; } @@ -653,15 +882,20 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) DRM_MODE_CONNECTOR_DSI); ctx->power = devm_regulator_get(&dsi->dev, "power"); - if (IS_ERR(ctx->power)) { - dev_err(&dsi->dev, "Couldn't get our power regulator\n"); - return PTR_ERR(ctx->power); - } - - ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset)) { - dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); - return PTR_ERR(ctx->reset); + if (IS_ERR(ctx->power)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power), + "Couldn't get our power regulator\n"); + + ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset)) + return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), + "Couldn't get our reset GPIO\n"); + + ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation); + if (ret) { + dev_err(&dsi->dev, "%pOF: failed to get orientation: %d\n", + dsi->dev.of_node, ret); + return ret; } ret = drm_panel_of_backlight(&ctx->panel); @@ -670,7 +904,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) drm_panel_add(&ctx->panel); - dsi->mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE; + dsi->mode_flags = ctx->desc->mode_flags; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; @@ -691,17 +925,28 @@ static const struct ili9881c_desc lhr050h41_desc = { .init = lhr050h41_init, .init_length = ARRAY_SIZE(lhr050h41_init), .mode = &lhr050h41_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, }; static const struct ili9881c_desc k101_im2byl02_desc = { .init = k101_im2byl02_init, .init_length = ARRAY_SIZE(k101_im2byl02_init), .mode = &k101_im2byl02_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, +}; + +static const struct ili9881c_desc w552946aba_desc = { + .init = w552946ab_init, + .init_length = ARRAY_SIZE(w552946ab_init), + .mode = &w552946aba_default_mode, + .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, + { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { } }; MODULE_DEVICE_TABLE(of, ili9881c_of_match); diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c index 34b98f70bd22..c558de3f99be 100644 --- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c +++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c @@ -198,16 +198,14 @@ static int ej030na_probe(struct spi_device *spi) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(priv->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(priv->supply); - } + if (IS_ERR(priv->supply)) + return dev_err_probe(dev, PTR_ERR(priv->supply), + "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(priv->reset_gpio); - } + if (IS_ERR(priv->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), + "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &ej030na_funcs, DRM_MODE_CONNECTOR_DPI); diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index aea316225391..f194b62e290c 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -484,6 +484,7 @@ static void innolux_panel_del(struct innolux_panel *innolux) static int innolux_panel_probe(struct mipi_dsi_device *dsi) { const struct panel_desc *desc; + struct innolux_panel *innolux; int err; desc = of_device_get_match_data(&dsi->dev); @@ -495,7 +496,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + innolux = mipi_dsi_get_drvdata(dsi); + innolux_panel_del(innolux); + return err; + } + + return 0; } static int innolux_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c new file mode 100644 index 000000000000..31eafbc38ec0 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 Raffaele Tranquillini <raffaele.tranquillini@gmail.com> + * + * Generated using linux-mdss-dsi-panel-driver-generator from Lineage OS device tree: + * https://github.com/LineageOS/android_kernel_xiaomi_msm8996/blob/lineage-18.1/arch/arm/boot/dts/qcom/a1-msm8996-mtp.dtsi + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct jdi_fhd_r63452 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct gpio_desc *reset_gpio; + bool prepared; +}; + +static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel) +{ + return container_of(panel, struct jdi_fhd_r63452, panel); +} + +#define dsi_generic_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xb0, 0x00); + dsi_generic_write_seq(dsi, 0xd6, 0x01); + dsi_generic_write_seq(dsi, 0xec, + 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, + 0x13, 0x15, 0x68, 0x0b, 0xb5); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(dev, "Failed to set tear on: %d\n", ret); + return ret; + } + + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00); + + ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77); + if (ret < 0) { + dev_err(dev, "Failed to set pixel format: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437); + if (ret < 0) { + dev_err(dev, "Failed to set column address: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f); + if (ret < 0) { + dev_err(dev, "Failed to set page address: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000); + if (ret < 0) { + dev_err(dev, "Failed to set tear scanline: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff); + if (ret < 0) { + dev_err(dev, "Failed to set display brightness: %d\n", ret); + return ret; + } + + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); + dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00); + dsi_dcs_write_seq(dsi, 0x84, 0x00); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + msleep(20); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(80); + + dsi_generic_write_seq(dsi, 0xb0, 0x04); + dsi_dcs_write_seq(dsi, 0x84, 0x00); + dsi_generic_write_seq(dsi, 0xc8, 0x11); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + return 0; +} + +static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xb0, 0x00); + dsi_generic_write_seq(dsi, 0xd6, 0x01); + dsi_generic_write_seq(dsi, 0xec, + 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, + 0x13, 0x15, 0x68, 0x0b, 0x95); + dsi_generic_write_seq(dsi, 0xb0, 0x03); + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + usleep_range(2000, 3000); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + return 0; +} + +static int jdi_fhd_r63452_prepare(struct drm_panel *panel) +{ + struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + jdi_fhd_r63452_reset(ctx); + + ret = jdi_fhd_r63452_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + return ret; + } + + ctx->prepared = true; + return 0; +} + +static int jdi_fhd_r63452_unprepare(struct drm_panel *panel) +{ + struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = jdi_fhd_r63452_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + + ctx->prepared = false; + return 0; +} + +static const struct drm_display_mode jdi_fhd_r63452_mode = { + .clock = (1080 + 120 + 16 + 40) * (1920 + 4 + 2 + 4) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 120, + .hsync_end = 1080 + 120 + 16, + .htotal = 1080 + 120 + 16 + 40, + .vdisplay = 1920, + .vsync_start = 1920 + 4, + .vsync_end = 1920 + 4 + 2, + .vtotal = 1920 + 4 + 2 + 4, + .width_mm = 64, + .height_mm = 114, +}; + +static int jdi_fhd_r63452_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &jdi_fhd_r63452_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs jdi_fhd_r63452_panel_funcs = { + .prepare = jdi_fhd_r63452_prepare, + .unprepare = jdi_fhd_r63452_unprepare, + .get_modes = jdi_fhd_r63452_get_modes, +}; + +static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct jdi_fhd_r63452 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + return ret; + } + + return 0; +} + +static int jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi) +{ + struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id jdi_fhd_r63452_of_match[] = { + { .compatible = "jdi,fhd-r63452" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, jdi_fhd_r63452_of_match); + +static struct mipi_dsi_driver jdi_fhd_r63452_driver = { + .probe = jdi_fhd_r63452_probe, + .remove = jdi_fhd_r63452_remove, + .driver = { + .name = "panel-jdi-fhd-r63452", + .of_match_table = jdi_fhd_r63452_of_match, + }, +}; +module_mipi_dsi_driver(jdi_fhd_r63452_driver); + +MODULE_AUTHOR("Raffaele Tranquillini <raffaele.tranquillini@gmail.com>"); +MODULE_DESCRIPTION("DRM driver for JDI FHD R63452 DSI panel, command mode"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index 733010b5e4f5..3c86ad262d5e 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -473,7 +473,13 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + jdi_panel_del(jdi); + return ret; + } + + return 0; } static int jdi_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 86e4213e8bb1..daccb1fd5fda 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -406,7 +406,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + kingdisplay_panel_del(kingdisplay); + return err; + } + + return 0; } static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 59a8d99e777d..27a1c9923b09 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -20,6 +20,7 @@ #include <video/videomode.h> #include <drm/drm_crtc.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> struct panel_lvds { @@ -116,7 +117,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) { struct device_node *np = lvds->dev->of_node; struct display_timing timing; - const char *mapping; int ret; ret = of_drm_get_panel_orientation(np, &lvds->orientation); @@ -149,24 +149,14 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) of_property_read_string(np, "label", &lvds->label); - ret = of_property_read_string(np, "data-mapping", &mapping); + ret = drm_of_lvds_get_data_mapping(np); if (ret < 0) { dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", np, "data-mapping"); - return -ENODEV; + return ret; } - if (!strcmp(mapping, "jeida-18")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; - } else if (!strcmp(mapping, "jeida-24")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; - } else if (!strcmp(mapping, "vesa-24")) { - lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; - } else { - dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n", - np, "data-mapping"); - return -EINVAL; - } + lvds->bus_format = ret; lvds->data_mirror = of_property_read_bool(np, "data-mirror"); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c new file mode 100644 index 000000000000..288c7fa83ecc --- /dev/null +++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Novatek NT35950 DriverIC panels driver + * + * Copyright (c) 2021 AngeloGioacchino Del Regno + * <angelogioacchino.delregno@somainline.org> + */ +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#define MCS_CMD_MAUCCTR 0xf0 /* Manufacturer command enable */ +#define MCS_PARAM_SCALER_FUNCTION 0x58 /* Scale-up function */ +#define MCS_PARAM_SCALEUP_MODE 0xc9 + #define MCS_SCALEUP_SIMPLE 0x0 + #define MCS_SCALEUP_BILINEAR BIT(0) + #define MCS_SCALEUP_DUPLICATE (BIT(0) | BIT(4)) + +/* VESA Display Stream Compression param */ +#define MCS_PARAM_VESA_DSC_ON 0x03 + +/* Data Compression mode */ +#define MCS_PARAM_DATA_COMPRESSION 0x90 + #define MCS_DATA_COMPRESSION_NONE 0x00 + #define MCS_DATA_COMPRESSION_FBC 0x02 + #define MCS_DATA_COMPRESSION_DSC 0x03 + +/* Display Output control */ +#define MCS_PARAM_DISP_OUTPUT_CTRL 0xb4 + #define MCS_DISP_OUT_SRAM_EN BIT(0) + #define MCS_DISP_OUT_VIDEO_MODE BIT(4) + +/* VESA Display Stream Compression setting */ +#define MCS_PARAM_VESA_DSC_SETTING 0xc0 + +/* SubPixel Rendering (SPR) */ +#define MCS_PARAM_SPR_EN 0xe3 +#define MCS_PARAM_SPR_MODE 0xef + #define MCS_SPR_MODE_YYG_RAINBOW_RGB 0x01 + +#define NT35950_VREG_MAX 4 + +struct nt35950 { + struct drm_panel panel; + struct drm_connector *connector; + struct mipi_dsi_device *dsi[2]; + struct regulator_bulk_data vregs[NT35950_VREG_MAX]; + struct gpio_desc *reset_gpio; + const struct nt35950_panel_desc *desc; + + int cur_mode; + u8 last_page; + bool prepared; +}; + +struct nt35950_panel_mode { + const struct drm_display_mode mode; + + bool enable_sram; + bool is_video_mode; + u8 scaler_on; + u8 scaler_mode; + u8 compression; + u8 spr_en; + u8 spr_mode; +}; + +struct nt35950_panel_desc { + const char *model_name; + const struct mipi_dsi_device_info dsi_info; + const struct nt35950_panel_mode *mode_data; + + bool is_dual_dsi; + u8 num_lanes; + u8 num_modes; +}; + +static inline struct nt35950 *to_nt35950(struct drm_panel *panel) +{ + return container_of(panel, struct nt35950, panel); +} + +#define dsi_dcs_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void nt35950_reset(struct nt35950 *nt) +{ + gpiod_set_value_cansleep(nt->reset_gpio, 1); + usleep_range(12000, 13000); + gpiod_set_value_cansleep(nt->reset_gpio, 0); + usleep_range(300, 400); + gpiod_set_value_cansleep(nt->reset_gpio, 1); + usleep_range(12000, 13000); +} + +/* + * nt35950_set_cmd2_page - Select manufacturer control (CMD2) page + * @nt: Main driver structure + * @page: Page number (0-7) + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page) +{ + const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52, + 0x08, page }; + int ret; + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page, + ARRAY_SIZE(mauc_cmd2_page)); + if (ret < 0) + return ret; + + nt->last_page = page; + return 0; +} + +/* + * nt35950_set_data_compression - Set data compression mode + * @nt: Main driver structure + * @comp_mode: Compression mode + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode) +{ + u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode }; + u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode }; + u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 }; + u8 last_page = nt->last_page; + int ret; + + /* Set CMD2 Page 0 if we're not there yet */ + if (last_page != 0) { + ret = nt35950_set_cmd2_page(nt, 0); + if (ret < 0) + return ret; + } + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression, + ARRAY_SIZE(cmd_data_compression)); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on, + ARRAY_SIZE(cmd_vesa_dsc_on)); + if (ret < 0) + return ret; + + /* Set the vesa dsc setting on Page 4 */ + ret = nt35950_set_cmd2_page(nt, 4); + if (ret < 0) + return ret; + + /* Display Stream Compression setting, always 0x03 */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting, + ARRAY_SIZE(cmd_vesa_dsc_setting)); + if (ret < 0) + return ret; + + /* Get back to the previously set page */ + return nt35950_set_cmd2_page(nt, last_page); +} + +/* + * nt35950_set_scaler - Enable/disable resolution upscaling + * @nt: Main driver structure + * @scale_up: Scale up function control + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up) +{ + u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up }; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, + ARRAY_SIZE(cmd_scaler)); +} + +/* + * nt35950_set_scale_mode - Resolution upscaling mode + * @nt: Main driver structure + * @mode: Scaler mode (MCS_DATA_COMPRESSION_*) + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode) +{ + u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode }; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, + ARRAY_SIZE(cmd_scaler)); +} + +/* + * nt35950_inject_black_image - Display a completely black image + * @nt: Main driver structure + * + * After IC setup, the attached panel may show random data + * due to driveric behavior changes (resolution, compression, + * scaling, etc). This function, called after parameters setup, + * makes the driver ic to output a completely black image to + * the display. + * It makes sense to push a black image before sending the sleep-out + * and display-on commands. + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_inject_black_image(struct nt35950 *nt) +{ + const u8 cmd0_black_img[] = { 0x6f, 0x01 }; + const u8 cmd1_black_img[] = { 0xf3, 0x10 }; + u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 }; + int ret; + + /* Enable test command */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); + if (ret < 0) + return ret; + + /* Send a black image */ + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img, + ARRAY_SIZE(cmd0_black_img)); + if (ret < 0) + return ret; + ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img, + ARRAY_SIZE(cmd1_black_img)); + if (ret < 0) + return ret; + + /* Disable test command */ + cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00; + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); +} + +/* + * nt35950_set_dispout - Set Display Output register parameters + * @nt: Main driver structure + * + * Return: Number of transferred bytes or negative number on error + */ +static int nt35950_set_dispout(struct nt35950 *nt) +{ + u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 }; + const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; + + if (mode_data[nt->cur_mode].is_video_mode) + cmd_dispout[1] |= MCS_DISP_OUT_VIDEO_MODE; + if (mode_data[nt->cur_mode].enable_sram) + cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN; + + return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout, + ARRAY_SIZE(cmd_dispout)); +} + +static int nt35950_get_current_mode(struct nt35950 *nt) +{ + struct drm_connector *connector = nt->connector; + struct drm_crtc_state *crtc_state; + int i; + + /* Return the default (first) mode if no info available yet */ + if (!connector->state || !connector->state->crtc) + return 0; + + crtc_state = connector->state->crtc->state; + + for (i = 0; i < nt->desc->num_modes; i++) { + if (drm_mode_match(&crtc_state->mode, + &nt->desc->mode_data[i].mode, + DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK)) + return i; + } + + return 0; +} + +static int nt35950_on(struct nt35950 *nt) +{ + const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; + struct mipi_dsi_device *dsi = nt->dsi[0]; + struct device *dev = &dsi->dev; + int ret; + + nt->cur_mode = nt35950_get_current_mode(nt); + nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; + + ret = nt35950_set_cmd2_page(nt, 0); + if (ret < 0) + return ret; + + ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression); + if (ret < 0) + return ret; + + ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode); + if (ret < 0) + return ret; + + ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on); + if (ret < 0) + return ret; + + ret = nt35950_set_dispout(nt); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (ret < 0) { + dev_err(dev, "Failed to set tear on: %d\n", ret); + return ret; + } + + ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0); + if (ret < 0) { + dev_err(dev, "Failed to set tear scanline: %d\n", ret); + return ret; + } + + /* CMD2 Page 1 */ + ret = nt35950_set_cmd2_page(nt, 1); + if (ret < 0) + return ret; + + /* Unknown command */ + dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88); + + /* CMD2 Page 7 */ + ret = nt35950_set_cmd2_page(nt, 7); + if (ret < 0) + return ret; + + /* Enable SubPixel Rendering */ + dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01); + + /* SPR Mode: YYG Rainbow-RGB */ + dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB); + + /* CMD3 */ + ret = nt35950_inject_black_image(nt); + if (ret < 0) + return ret; + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) + return ret; + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) + return ret; + msleep(120); + + nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; + + return 0; +} + +static int nt35950_off(struct nt35950 *nt) +{ + struct device *dev = &nt->dsi[0]->dev; + int ret; + + ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + goto set_lpm; + } + usleep_range(10000, 11000); + + ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + goto set_lpm; + } + msleep(150); + +set_lpm: + nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; + nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev) +{ + int ret; + + nt->vregs[0].supply = "vddio"; + nt->vregs[1].supply = "avdd"; + nt->vregs[2].supply = "avee"; + nt->vregs[3].supply = "dvdd"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->vregs), + nt->vregs); + if (ret < 0) + return ret; + + ret = regulator_is_supported_voltage(nt->vregs[0].consumer, + 1750000, 1950000); + if (!ret) + return -EINVAL; + ret = regulator_is_supported_voltage(nt->vregs[1].consumer, + 5200000, 5900000); + if (!ret) + return -EINVAL; + /* AVEE is negative: -5.90V to -5.20V */ + ret = regulator_is_supported_voltage(nt->vregs[2].consumer, + 5200000, 5900000); + if (!ret) + return -EINVAL; + + ret = regulator_is_supported_voltage(nt->vregs[3].consumer, + 1300000, 1400000); + if (!ret) + return -EINVAL; + + return 0; +} + +static int nt35950_prepare(struct drm_panel *panel) +{ + struct nt35950 *nt = to_nt35950(panel); + struct device *dev = &nt->dsi[0]->dev; + int ret; + + if (nt->prepared) + return 0; + + ret = regulator_enable(nt->vregs[0].consumer); + if (ret) + return ret; + usleep_range(2000, 5000); + + ret = regulator_enable(nt->vregs[3].consumer); + if (ret) + goto end; + usleep_range(15000, 18000); + + ret = regulator_enable(nt->vregs[1].consumer); + if (ret) + goto end; + + ret = regulator_enable(nt->vregs[2].consumer); + if (ret) + goto end; + usleep_range(12000, 13000); + + nt35950_reset(nt); + + ret = nt35950_on(nt); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + goto end; + } + nt->prepared = true; + +end: + if (ret < 0) { + regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); + return ret; + } + + return 0; +} + +static int nt35950_unprepare(struct drm_panel *panel) +{ + struct nt35950 *nt = to_nt35950(panel); + struct device *dev = &nt->dsi[0]->dev; + int ret; + + if (!nt->prepared) + return 0; + + ret = nt35950_off(nt); + if (ret < 0) + dev_err(dev, "Failed to deinitialize panel: %d\n", ret); + + gpiod_set_value_cansleep(nt->reset_gpio, 0); + regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); + + nt->prepared = false; + return 0; +} + +static int nt35950_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct nt35950 *nt = to_nt35950(panel); + int i; + + for (i = 0; i < nt->desc->num_modes; i++) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, + &nt->desc->mode_data[i].mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type |= DRM_MODE_TYPE_DRIVER; + if (nt->desc->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.height_mm = nt->desc->mode_data[0].mode.height_mm; + connector->display_info.width_mm = nt->desc->mode_data[0].mode.width_mm; + nt->connector = connector; + + return nt->desc->num_modes; +} + +static const struct drm_panel_funcs nt35950_panel_funcs = { + .prepare = nt35950_prepare, + .unprepare = nt35950_unprepare, + .get_modes = nt35950_get_modes, +}; + +static int nt35950_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct device_node *dsi_r; + struct mipi_dsi_host *dsi_r_host; + struct nt35950 *nt; + const struct mipi_dsi_device_info *info; + int i, num_dsis = 1, ret; + + nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL); + if (!nt) + return -ENOMEM; + + ret = nt35950_sharp_init_vregs(nt, dev); + if (ret) + return dev_err_probe(dev, ret, "Regulator init failure.\n"); + + nt->desc = of_device_get_match_data(dev); + if (!nt->desc) + return -ENODEV; + + nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS); + if (IS_ERR(nt->reset_gpio)) { + return dev_err_probe(dev, PTR_ERR(nt->reset_gpio), + "Failed to get reset gpio\n"); + } + + /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */ + if (nt->desc->is_dual_dsi) { + info = &nt->desc->dsi_info; + dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); + if (!dsi_r) { + dev_err(dev, "Cannot get secondary DSI node.\n"); + return -ENODEV; + } + dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); + of_node_put(dsi_r); + if (!dsi_r_host) { + dev_err(dev, "Cannot get secondary DSI host\n"); + return -EPROBE_DEFER; + } + + nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info); + if (!nt->dsi[1]) { + dev_err(dev, "Cannot get secondary DSI node\n"); + return -ENODEV; + } + num_dsis++; + } + + nt->dsi[0] = dsi; + mipi_dsi_set_drvdata(dsi, nt); + + drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&nt->panel); + if (ret) + return dev_err_probe(dev, ret, "Failed to get backlight\n"); + + drm_panel_add(&nt->panel); + + for (i = 0; i < num_dsis; i++) { + nt->dsi[i]->lanes = nt->desc->num_lanes; + nt->dsi[i]->format = MIPI_DSI_FMT_RGB888; + + nt->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_LPM; + + if (nt->desc->mode_data[0].is_video_mode) + nt->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO; + + ret = mipi_dsi_attach(nt->dsi[i]); + if (ret < 0) { + return dev_err_probe(dev, ret, + "Cannot attach to DSI%d host.\n", i); + } + } + + /* Make sure to set RESX LOW before starting the power-on sequence */ + gpiod_set_value_cansleep(nt->reset_gpio, 0); + return 0; +} + +static int nt35950_remove(struct mipi_dsi_device *dsi) +{ + struct nt35950 *nt = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(nt->dsi[0]); + if (ret < 0) + dev_err(&dsi->dev, + "Failed to detach from DSI0 host: %d\n", ret); + + if (nt->dsi[1]) { + ret = mipi_dsi_detach(nt->dsi[1]); + if (ret < 0) + dev_err(&dsi->dev, + "Failed to detach from DSI1 host: %d\n", ret); + mipi_dsi_device_unregister(nt->dsi[1]); + } + + drm_panel_remove(&nt->panel); + + return 0; +} + +static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = { + { + /* 1920x1080 60Hz no compression */ + .mode = { + .clock = 214537, + .hdisplay = 1080, + .hsync_start = 1080 + 400, + .hsync_end = 1080 + 400 + 40, + .htotal = 1080 + 400 + 40 + 300, + .vdisplay = 1920, + .vsync_start = 1920 + 12, + .vsync_end = 1920 + 12 + 2, + .vtotal = 1920 + 12 + 2 + 10, + .width_mm = 68, + .height_mm = 121, + }, + .compression = MCS_DATA_COMPRESSION_NONE, + .enable_sram = true, + .is_video_mode = false, + .scaler_on = 1, + .scaler_mode = MCS_SCALEUP_DUPLICATE, + }, + /* TODO: Add 2160x3840 60Hz when DSC is supported */ +}; + +static const struct nt35950_panel_desc sharp_ls055d1sx04 = { + .model_name = "Sharp LS055D1SX04", + .dsi_info = { + .type = "LS055D1SX04", + .channel = 0, + .node = NULL, + }, + .mode_data = sharp_ls055d1sx04_modes, + .num_modes = ARRAY_SIZE(sharp_ls055d1sx04_modes), + .is_dual_dsi = true, + .num_lanes = 4, +}; + +static const struct of_device_id nt35950_of_match[] = { + { .compatible = "sharp,ls055d1sx04", .data = &sharp_ls055d1sx04 }, + { } +}; +MODULE_DEVICE_TABLE(of, nt35950_of_match); + +static struct mipi_dsi_driver nt35950_driver = { + .probe = nt35950_probe, + .remove = nt35950_remove, + .driver = { + .name = "panel-novatek-nt35950", + .of_match_table = nt35950_of_match, + }, +}; +module_mipi_dsi_driver(nt35950_driver); + +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>"); +MODULE_DESCRIPTION("Novatek NT35950 DriverIC panels driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c index 533cd3934b8b..231f371901e8 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c @@ -618,7 +618,7 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo) ret = regulator_set_load(pinfo->supplies[i].consumer, nt36672a_regulator_enable_loads[i]); if (ret) - return dev_err_probe(dev, ret, "failed to set regulator enable loads\n"); + return dev_err_probe(dev, ret, "failed to set regulator enable loads\n"); } pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); @@ -656,7 +656,13 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + drm_panel_remove(&pinfo->base); + return err; + } + + return 0; } static int nt36672a_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index f8151fe3ac9a..d036853db865 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -258,16 +258,13 @@ static int nt39016_probe(struct spi_device *spi) return -EINVAL; panel->supply = devm_regulator_get(dev, "power"); - if (IS_ERR(panel->supply)) { - dev_err(dev, "Failed to get power supply\n"); - return PTR_ERR(panel->supply); - } + if (IS_ERR(panel->supply)) + return dev_err_probe(dev, PTR_ERR(panel->supply), + "Failed to get power supply\n"); panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(panel->reset_gpio)) { - dev_err(dev, "Failed to get reset GPIO\n"); - return PTR_ERR(panel->reset_gpio); - } + if (IS_ERR(panel->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(panel->reset_gpio), "Failed to get reset GPIO\n"); spi->bits_per_word = 8; spi->mode = SPI_MODE_3 | SPI_3WIRE; @@ -287,11 +284,8 @@ static int nt39016_probe(struct spi_device *spi) DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&panel->drm_panel); - if (err) { - if (err != -EPROBE_DEFER) - dev_err(dev, "Failed to get backlight handle\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to get backlight handle\n"); drm_panel_add(&panel->drm_panel); diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index 3c20beeb1781..3991f5d950af 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -241,7 +241,13 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + wuxga_nt_panel_del(wuxga_nt); + return ret; + } + + return 0; } static int wuxga_nt_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index a3782830ae3c..1fb579a574d9 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -199,7 +199,13 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; } static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index ccc8ed6fe3ae..e38262b67ff7 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -452,27 +452,22 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) ctx->supplies[1].supply = "vci"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "failed to get regulators: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get regulators\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset_gpio)) { - dev_err(dev, "cannot get reset-gpio: %ld\n", - PTR_ERR(ctx->reset_gpio)); - return PTR_ERR(ctx->reset_gpio); - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "cannot get reset-gpio\n"); drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs, DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx, &s6e63j0x03_bl_ops, NULL); - if (IS_ERR(ctx->bl_dev)) { - dev_err(dev, "failed to register backlight device\n"); - return PTR_ERR(ctx->bl_dev); - } + if (IS_ERR(ctx->bl_dev)) + return dev_err_probe(dev, PTR_ERR(ctx->bl_dev), + "failed to register backlight device\n"); ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS; ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS; diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c index ea63799ff2a1..29fde3823212 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c @@ -247,6 +247,7 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi) ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); return ret; } diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c index 8cb1853574bb..1fb37fda4ba9 100644 --- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c +++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c @@ -270,18 +270,14 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) } ctx->supply = devm_regulator_get(dev, "vddio"); - if (IS_ERR(ctx->supply)) { - ret = PTR_ERR(ctx->supply); - dev_err(dev, "Failed to get vddio regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->supply)) + return dev_err_probe(dev, PTR_ERR(ctx->supply), + "Failed to get vddio regulator\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(ctx->reset_gpio)) { - ret = PTR_ERR(ctx->reset_gpio); - dev_warn(dev, "Failed to get reset-gpios: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); ctx->dsi = dsi; mipi_dsi_set_drvdata(dsi, ctx); @@ -302,6 +298,7 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi) ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); return ret; } diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index 94992f45113a..a07d0f6c3e69 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -146,22 +146,19 @@ static int ls037v7dw01_probe(struct platform_device *pdev) lcd->pdev = pdev; lcd->vdd = devm_regulator_get(&pdev->dev, "envdd"); - if (IS_ERR(lcd->vdd)) { - dev_err(&pdev->dev, "failed to get regulator\n"); - return PTR_ERR(lcd->vdd); - } + if (IS_ERR(lcd->vdd)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->vdd), + "failed to get regulator\n"); lcd->ini_gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(lcd->ini_gpio)) { - dev_err(&pdev->dev, "failed to get enable gpio\n"); - return PTR_ERR(lcd->ini_gpio); - } + if (IS_ERR(lcd->ini_gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->ini_gpio), + "failed to get enable gpio\n"); lcd->resb_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(lcd->resb_gpio)) { - dev_err(&pdev->dev, "failed to get reset gpio\n"); - return PTR_ERR(lcd->resb_gpio); - } + if (IS_ERR(lcd->resb_gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(lcd->resb_gpio), + "failed to get reset gpio\n"); lcd->mo_gpio = devm_gpiod_get_index(&pdev->dev, "mode", 0, GPIOD_OUT_LOW); diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index b937e24dac8e..25829a0a8e80 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -296,7 +296,13 @@ static int sharp_nt_panel_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - return mipi_dsi_attach(dsi); + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + sharp_nt_panel_del(sharp_nt); + return ret; + } + + return 0; } static int sharp_nt_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index eb475a3a774b..9e46db5e359c 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2031,6 +2031,31 @@ static const struct panel_desc innolux_g070y2_l01 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode innolux_g070y2_t02_mode = { + .clock = 33333, + .hdisplay = 800, + .hsync_start = 800 + 210, + .hsync_end = 800 + 210 + 20, + .htotal = 800 + 210 + 20 + 46, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 10, + .vtotal = 480 + 22 + 23 + 10, +}; + +static const struct panel_desc innolux_g070y2_t02 = { + .modes = &innolux_g070y2_t02_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 92, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, +}; + static const struct display_timing innolux_g101ice_l01_timing = { .pixelclock = { 60400000, 71100000, 74700000 }, .hactive = { 1280, 1280, 1280 }, @@ -3227,6 +3252,33 @@ static const struct panel_desc starry_kr070pe2t = { .connector_type = DRM_MODE_CONNECTOR_DPI, }; +static const struct display_timing tsd_tst043015cmhx_timing = { + .pixelclock = { 5000000, 9000000, 12000000 }, + .hactive = { 480, 480, 480 }, + .hfront_porch = { 4, 5, 65 }, + .hback_porch = { 36, 40, 255 }, + .hsync_len = { 1, 1, 1 }, + .vactive = { 272, 272, 272 }, + .vfront_porch = { 2, 8, 97 }, + .vback_porch = { 3, 8, 31 }, + .vsync_len = { 1, 1, 1 }, + + .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW | + DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE, +}; + +static const struct panel_desc tsd_tst043015cmhx = { + .timings = &tsd_tst043015cmhx_timing, + .num_timings = 1, + .bpc = 8, + .size = { + .width = 105, + .height = 67, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE, +}; + static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = { .clock = 30000, .hdisplay = 800, @@ -3473,6 +3525,31 @@ static const struct panel_desc urt_umsh_8596md_parallel = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode vivax_tpc9150_panel_mode = { + .clock = 60000, + .hdisplay = 1024, + .hsync_start = 1024 + 160, + .hsync_end = 1024 + 160 + 100, + .htotal = 1024 + 160 + 100 + 60, + .vdisplay = 600, + .vsync_start = 600 + 12, + .vsync_end = 600 + 12 + 10, + .vtotal = 600 + 12 + 10 + 13, +}; + +static const struct panel_desc vivax_tpc9150_panel = { + .modes = &vivax_tpc9150_panel_mode, + .num_modes = 1, + .bpc = 6, + .size = { + .width = 200, + .height = 115, + }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode vl050_8048nt_c01_mode = { .clock = 33333, .hdisplay = 800, @@ -3738,6 +3815,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,g070y2-l01", .data = &innolux_g070y2_l01, }, { + .compatible = "innolux,g070y2-t02", + .data = &innolux_g070y2_t02, + }, { .compatible = "innolux,g101ice-l01", .data = &innolux_g101ice_l01 }, { @@ -3876,6 +3956,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "starry,kr070pe2t", .data = &starry_kr070pe2t, }, { + .compatible = "team-source-display,tst043015cmhx", + .data = &tsd_tst043015cmhx, + }, { .compatible = "tfc,s9700rtwv43tr-01b", .data = &tfc_s9700rtwv43tr_01b, }, { @@ -3921,6 +4004,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "urt,umsh-8596md-20t", .data = &urt_umsh_8596md_parallel, }, { + .compatible = "vivax,tpc9150-panel", + .data = &vivax_tpc9150_panel, + }, { .compatible = "vxt,vl050-8048nt-c01", .data = &vl050_8048nt_c01, }, { diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c new file mode 100644 index 000000000000..69f07b15fca4 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, Linaro Limited + * + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree: + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct truly_nt35521 { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data supplies[2]; + struct gpio_desc *reset_gpio; + struct gpio_desc *blen_gpio; + bool prepared; + bool enabled; +}; + +static inline +struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel) +{ + return container_of(panel, struct truly_nt35521, panel); +} + +#define dsi_generic_write_seq(dsi, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ + } while (0) + +static void truly_nt35521_reset(struct truly_nt35521 *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(150); +} + +static int truly_nt35521_on(struct truly_nt35521 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80); + dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00); + dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00); + dsi_generic_write_seq(dsi, 0x6f, 0x01); + dsi_generic_write_seq(dsi, 0xb1, 0x21); + dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01); + dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02); + dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11); + dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb6, 0x02); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01); + dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09); + dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09); + dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00); + dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00); + dsi_generic_write_seq(dsi, 0xca, 0x00); + dsi_generic_write_seq(dsi, 0xc0, 0x04); + dsi_generic_write_seq(dsi, 0xbe, 0xb5); + dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35); + dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25); + dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43); + dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02); + dsi_generic_write_seq(dsi, 0xee, 0x03); + dsi_generic_write_seq(dsi, 0xb0, + 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3, + 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11); + dsi_generic_write_seq(dsi, 0xb1, + 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3, + 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77); + dsi_generic_write_seq(dsi, 0xb2, + 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c, + 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90); + dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98); + dsi_generic_write_seq(dsi, 0xb4, + 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9, + 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02); + dsi_generic_write_seq(dsi, 0xb5, + 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf, + 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76); + dsi_generic_write_seq(dsi, 0xb6, + 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c, + 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2); + dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba); + dsi_generic_write_seq(dsi, 0xb8, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a, + 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9); + dsi_generic_write_seq(dsi, 0xb9, + 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3, + 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67); + dsi_generic_write_seq(dsi, 0xba, + 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31, + 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4); + dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03); + dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00); + dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00); + dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00); + dsi_generic_write_seq(dsi, 0xc4, 0x60); + dsi_generic_write_seq(dsi, 0xc5, 0xc0); + dsi_generic_write_seq(dsi, 0xc6, 0x00); + dsi_generic_write_seq(dsi, 0xc7, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05); + dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06); + dsi_generic_write_seq(dsi, 0xb8, 0x00); + dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03); + dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03); + dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03); + dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03); + dsi_generic_write_seq(dsi, 0xc0, 0x0b); + dsi_generic_write_seq(dsi, 0xc1, 0x09); + dsi_generic_write_seq(dsi, 0xc2, 0xa6); + dsi_generic_write_seq(dsi, 0xc3, 0x05); + dsi_generic_write_seq(dsi, 0xc4, 0x00); + dsi_generic_write_seq(dsi, 0xc5, 0x02); + dsi_generic_write_seq(dsi, 0xc6, 0x22); + dsi_generic_write_seq(dsi, 0xc7, 0x03); + dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20); + dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20); + dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60); + dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60); + dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10); + dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10); + dsi_generic_write_seq(dsi, 0xd0, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd5, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd6, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd7, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xe5, 0x06); + dsi_generic_write_seq(dsi, 0xe6, 0x06); + dsi_generic_write_seq(dsi, 0xe7, 0x00); + dsi_generic_write_seq(dsi, 0xe8, 0x06); + dsi_generic_write_seq(dsi, 0xe9, 0x06); + dsi_generic_write_seq(dsi, 0xea, 0x06); + dsi_generic_write_seq(dsi, 0xeb, 0x00); + dsi_generic_write_seq(dsi, 0xec, 0x00); + dsi_generic_write_seq(dsi, 0xed, 0x30); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06); + dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e); + dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34); + dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a); + dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10); + dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16); + dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31); + dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08); + dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01); + dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19); + dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13); + dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29); + dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31); + dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d); + dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d); + dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34); + dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a); + dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19); + dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13); + dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01); + dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31); + dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08); + dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02); + dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10); + dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16); + dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29); + dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31); + dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e); + dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31); + dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xe7, 0x00); + dsi_generic_write_seq(dsi, 0x6f, 0x02); + dsi_generic_write_seq(dsi, 0xf7, 0x47); + dsi_generic_write_seq(dsi, 0x6f, 0x0a); + dsi_generic_write_seq(dsi, 0xf7, 0x02); + dsi_generic_write_seq(dsi, 0x6f, 0x17); + dsi_generic_write_seq(dsi, 0xf4, 0x60); + dsi_generic_write_seq(dsi, 0x6f, 0x01); + dsi_generic_write_seq(dsi, 0xf9, 0x46); + dsi_generic_write_seq(dsi, 0x6f, 0x11); + dsi_generic_write_seq(dsi, 0xf3, 0x01); + dsi_generic_write_seq(dsi, 0x35, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00); + dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21); + dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00); + dsi_generic_write_seq(dsi, 0x35, 0x00); + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to exit sleep mode: %d\n", ret); + return ret; + } + msleep(120); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display on: %d\n", ret); + return ret; + } + usleep_range(1000, 2000); + + dsi_generic_write_seq(dsi, 0x53, 0x24); + + return 0; +} + +static int truly_nt35521_off(struct truly_nt35521 *ctx) +{ + struct mipi_dsi_device *dsi = ctx->dsi; + struct device *dev = &dsi->dev; + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) { + dev_err(dev, "Failed to set display off: %d\n", ret); + return ret; + } + msleep(50); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(dev, "Failed to enter sleep mode: %d\n", ret); + return ret; + } + msleep(150); + + return 0; +} + +static int truly_nt35521_prepare(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (ctx->prepared) + return 0; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + truly_nt35521_reset(ctx); + + ret = truly_nt35521_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + return ret; + } + + ctx->prepared = true; + return 0; +} + +static int truly_nt35521_unprepare(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + if (!ctx->prepared) + return 0; + + ret = truly_nt35521_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), + ctx->supplies); + + ctx->prepared = false; + return 0; +} + +static int truly_nt35521_enable(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + + if (ctx->enabled) + return 0; + + gpiod_set_value_cansleep(ctx->blen_gpio, 1); + + ctx->enabled = true; + return 0; +} + +static int truly_nt35521_disable(struct drm_panel *panel) +{ + struct truly_nt35521 *ctx = to_truly_nt35521(panel); + + if (!ctx->enabled) + return 0; + + gpiod_set_value_cansleep(ctx->blen_gpio, 0); + + ctx->enabled = false; + return 0; +} + +static const struct drm_display_mode truly_nt35521_mode = { + .clock = (720 + 232 + 20 + 112) * (1280 + 18 + 1 + 18) * 60 / 1000, + .hdisplay = 720, + .hsync_start = 720 + 232, + .hsync_end = 720 + 232 + 20, + .htotal = 720 + 232 + 20 + 112, + .vdisplay = 1280, + .vsync_start = 1280 + 18, + .vsync_end = 1280 + 18 + 1, + .vtotal = 1280 + 18 + 1 + 18, + .width_mm = 65, + .height_mm = 116, +}; + +static int truly_nt35521_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &truly_nt35521_mode); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs truly_nt35521_panel_funcs = { + .prepare = truly_nt35521_prepare, + .unprepare = truly_nt35521_unprepare, + .enable = truly_nt35521_enable, + .disable = truly_nt35521_disable, + .get_modes = truly_nt35521_get_modes, +}; + +static int truly_nt35521_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); + if (ret < 0) + return ret; + + return 0; +} + +static int truly_nt35521_bl_get_brightness(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness; + int ret; + + ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); + if (ret < 0) + return ret; + + return brightness & 0xff; +} + +static const struct backlight_ops truly_nt35521_bl_ops = { + .update_status = truly_nt35521_bl_update_status, + .get_brightness = truly_nt35521_bl_get_brightness, +}; + +static struct backlight_device * +truly_nt35521_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 255, + .max_brightness = 255, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &truly_nt35521_bl_ops, &props); +} + +static int truly_nt35521_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct truly_nt35521 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->supplies[0].supply = "positive5"; + ctx->supplies[1].supply = "negative5"; + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to get regulators: %d\n", ret); + return ret; + } + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->blen_gpio = devm_gpiod_get(dev, "backlight", GPIOD_OUT_LOW); + if (IS_ERR(ctx->blen_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->blen_gpio), + "Failed to get backlight-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET | + MIPI_DSI_CLOCK_NON_CONTINUOUS; + + drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + ctx->panel.backlight = truly_nt35521_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "Failed to attach to DSI host: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static int truly_nt35521_remove(struct mipi_dsi_device *dsi) +{ + struct truly_nt35521 *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); + + return 0; +} + +static const struct of_device_id truly_nt35521_of_match[] = { + { .compatible = "sony,tulip-truly-nt35521" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, truly_nt35521_of_match); + +static struct mipi_dsi_driver truly_nt35521_driver = { + .probe = truly_nt35521_probe, + .remove = truly_nt35521_remove, + .driver = { + .name = "panel-truly-nt35521", + .of_match_table = truly_nt35521_of_match, + }, +}; +module_mipi_dsi_driver(truly_nt35521_driver); + +MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); +MODULE_DESCRIPTION("DRM driver for Sony Tulip Truly NT35521 panel"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index bacaf1b7fb70..1866cdb8f9c1 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -431,16 +431,14 @@ static int td043mtea1_probe(struct spi_device *spi) memcpy(lcd->gamma, td043mtea1_def_gamma, sizeof(lcd->gamma)); lcd->vcc_reg = devm_regulator_get(&spi->dev, "vcc"); - if (IS_ERR(lcd->vcc_reg)) { - dev_err(&spi->dev, "failed to get VCC regulator\n"); - return PTR_ERR(lcd->vcc_reg); - } + if (IS_ERR(lcd->vcc_reg)) + return dev_err_probe(&spi->dev, PTR_ERR(lcd->vcc_reg), + "failed to get VCC regulator\n"); lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(lcd->reset_gpio)) { - dev_err(&spi->dev, "failed to get reset GPIO\n"); - return PTR_ERR(lcd->reset_gpio); - } + if (IS_ERR(lcd->reset_gpio)) + return dev_err_probe(&spi->dev, PTR_ERR(lcd->reset_gpio), + "failed to get reset GPIO\n"); spi->bits_per_word = 16; spi->mode = SPI_MODE_0; diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c index d17aae8b71d7..8177f5a360fb 100644 --- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c +++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c @@ -283,26 +283,19 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset_gpio)) { - dev_err(dev, "cannot get reset gpio\n"); - return PTR_ERR(ctx->reset_gpio); - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "cannot get reset gpio\n"); ctx->vci = devm_regulator_get(dev, "vci"); - if (IS_ERR(ctx->vci)) { - ret = PTR_ERR(ctx->vci); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request vci regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->vci)) + return dev_err_probe(dev, PTR_ERR(ctx->vci), + "Failed to request vci regulator\n"); ctx->iovcc = devm_regulator_get(dev, "iovcc"); - if (IS_ERR(ctx->iovcc)) { - ret = PTR_ERR(ctx->iovcc); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request iovcc regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->iovcc)) + return dev_err_probe(dev, PTR_ERR(ctx->iovcc), + "Failed to request iovcc regulator\n"); mipi_dsi_set_drvdata(dsi, ctx); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 82ad9a67f251..96bb5a465627 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -427,7 +427,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, } } - args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); + args->retained = drm_gem_shmem_madvise(&bo->base, args->madv); if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 23377481f4e3..ead65f5fa2bc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -49,7 +49,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) kvfree(bo->sgts); } - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } struct panfrost_gem_mapping * @@ -187,23 +187,25 @@ void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) static int panfrost_gem_pin(struct drm_gem_object *obj) { - if (to_panfrost_bo(obj)->is_heap) + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + + if (bo->is_heap) return -EINVAL; - return drm_gem_shmem_pin(obj); + return drm_gem_shmem_pin(&bo->base); } static const struct drm_gem_object_funcs panfrost_gem_funcs = { .free = panfrost_gem_free_object, .open = panfrost_gem_open, .close = panfrost_gem_close, - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .pin = panfrost_gem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; /** @@ -221,7 +223,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&obj->mappings.list); mutex_init(&obj->mappings.lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 1b9f68d8e9aa..b0142341e223 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -52,7 +52,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge_locked(obj); + drm_gem_shmem_purge_locked(&bo->base); ret = true; mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index f51d3f791a17..39562f2d11a4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -304,7 +304,8 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { struct panfrost_gem_object *bo = mapping->obj; - struct drm_gem_object *obj = &bo->base.base; + struct drm_gem_shmem_object *shmem = &bo->base; + struct drm_gem_object *obj = &shmem->base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; @@ -315,7 +316,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) if (bo->noexec) prot |= IOMMU_NOEXEC; - sgt = drm_gem_shmem_get_pages_sgt(obj); + sgt = drm_gem_shmem_get_pages_sgt(shmem); if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index e116a4d9b8e5..1d36df5af98d 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -105,7 +105,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - ret = drm_gem_shmem_vmap(&bo->base, &map); + ret = drm_gem_shmem_vmap(bo, &map); if (ret) goto err_put_mapping; perfcnt->buf = map.vaddr; @@ -164,7 +164,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_shmem_vunmap(&bo->base, &map); + drm_gem_shmem_vunmap(bo, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -194,7 +194,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/pl111/Kconfig b/drivers/gpu/drm/pl111/Kconfig index 3aae387a96af..91ee05b01303 100644 --- a/drivers/gpu/drm/pl111/Kconfig +++ b/drivers/gpu/drm/pl111/Kconfig @@ -6,7 +6,6 @@ config DRM_PL111 depends on VEXPRESS_CONFIG || VEXPRESS_CONFIG=n depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 1f9a59601bb1..6a36b0fd845c 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -57,13 +57,16 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) struct qxl_bo *bo; list_for_each_entry(bo, &qdev->gem.objects, list) { - struct dma_resv_list *fobj; - int rel; - - rcu_read_lock(); - fobj = dma_resv_shared_list(bo->tbo.base.resv); - rel = fobj ? fobj->shared_count : 0; - rcu_read_unlock(); + struct dma_resv_iter cursor; + struct dma_fence *fence; + int rel = 0; + + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + rel = 0; + ++rel; + } seq_printf(m, "size %ld, pc %d, num releases %d\n", (unsigned long)bo->tbo.base.size, diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index fc47b0deb021..e4b16421500b 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -29,7 +29,6 @@ #include "qxl_drv.h" -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vgaarb.h> @@ -295,7 +294,7 @@ static struct drm_driver qxl_driver = { static int __init qxl_init(void) { - if (vgacon_text_force() && qxl_modeset == -1) + if (drm_firmware_drivers_only() && qxl_modeset == -1) return -EINVAL; if (qxl_modeset == 0) diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c index 26001c2de9e9..dde0501aea68 100644 --- a/drivers/gpu/drm/r128/ati_pcigart.c +++ b/drivers/gpu/drm/r128/ati_pcigart.c @@ -215,7 +215,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga } ret = 0; -#if defined(__i386__) || defined(__x86_64__) +#ifdef CONFIG_X86 wbinvd(); #else mb(); diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 83e8b8547f9b..bd5dc09e860f 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -5983,7 +5983,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) #else // not __cplusplus -#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) +#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName)/sizeof(USHORT)) #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index f0cfb58da467..ac006bed4743 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -390,8 +390,7 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev) { struct ci_power_info *pi = ci_get_pi(rdev); - u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; - u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; + u16 hi_sidd, lo_sidd; struct radeon_cac_tdp_table *cac_tdp_table = rdev->pm.dpm.dyn_state.cac_tdp_table; diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 35b77c944701..9d2bcb9551e6 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -820,12 +820,12 @@ union fan_info { static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) { - u32 size = atom_table->ucNumEntries * - sizeof(struct radeon_clock_voltage_dependency_entry); int i; ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; - radeon_table->entries = kzalloc(size, GFP_KERNEL); + radeon_table->entries = kcalloc(atom_table->ucNumEntries, + sizeof(struct radeon_clock_voltage_dependency_entry), + GFP_KERNEL); if (!radeon_table->entries) return -ENOMEM; @@ -1361,7 +1361,9 @@ u16 r600_get_pcie_lane_support(struct radeon_device *rdev, u8 r600_encode_pci_lane_width(u32 lanes) { - u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; + static const u8 encoded_lanes[] = { + 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 + }; if (lanes > 16) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index ec867fa880a4..751c2c075e09 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -423,7 +423,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode) drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr, radeon_connector->port, mst_enc->pbn, slots); - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); radeon_dp_mst_set_be_cntl(primary, mst_enc, radeon_connector->mst_port->hpd.hpd, true); @@ -452,7 +452,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode) return; drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port); - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); /* and this can also fail */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b74cebca1f89..956c72b5aa33 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -31,7 +31,6 @@ #include <linux/compat.h> -#include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/vga_switcheroo.h> @@ -637,15 +636,11 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_module_init(void) { - if (vgacon_text_force() && radeon_modeset == -1) { - DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + if (drm_firmware_drivers_only() && radeon_modeset == -1) radeon_modeset = 0; - } - if (radeon_modeset == 0) { - DRM_ERROR("No UMS support in radeon module!\n"); + if (radeon_modeset == 0) return -EINVAL; - } DRM_INFO("radeon kernel modesetting enabled.\n"); radeon_register_atpx_handler(); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0473583dcdac..7afe28408085 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) #endif if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) - rdev->agp = radeon_agp_head_init(rdev->ddev); + rdev->agp = radeon_agp_head_init(dev); if (rdev->agp) { rdev->agp->agp_mtrr = arch_phys_wc_add( rdev->agp->agp_info.aper_base, @@ -168,7 +168,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) - dev_dbg(dev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); } if (radeon_is_px(dev)) { diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c index 9257b60144c4..b991ba1bcd51 100644 --- a/drivers/gpu/drm/radeon/radeon_sync.c +++ b/drivers/gpu/drm/radeon/radeon_sync.c @@ -91,33 +91,17 @@ int radeon_sync_resv(struct radeon_device *rdev, struct dma_resv *resv, bool shared) { - struct dma_resv_list *flist; - struct dma_fence *f; + struct dma_resv_iter cursor; struct radeon_fence *fence; - unsigned i; + struct dma_fence *f; int r = 0; - /* always sync to the exclusive fence */ - f = dma_resv_excl_fence(resv); - fence = f ? to_radeon_fence(f) : NULL; - if (fence && fence->rdev == rdev) - radeon_sync_fence(sync, fence); - else if (f) - r = dma_fence_wait(f, true); - - flist = dma_resv_shared_list(resv); - if (shared || !flist || r) - return r; - - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(resv)); + dma_resv_for_each_fence(&cursor, resv, shared, f) { fence = to_radeon_fence(f); if (fence && fence->rdev == rdev) radeon_sync_fence(sync, fence); else r = dma_fence_wait(f, true); - if (r) break; } diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 2ea86919d953..377f9cdb5b53 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -469,7 +469,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; - struct dma_fence *f; void *ptr; int i, r; @@ -479,13 +478,11 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - f = dma_resv_excl_fence(bo->tbo.base.resv); - if (f) { - r = radeon_fence_wait((struct radeon_fence *)f, false); - if (r) { - DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); - return r; - } + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); + if (r <= 0) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r ? r : -ETIME; } r = radeon_bo_kmap(bo, &ptr); diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index b47e74421e34..f6e6a6d5d987 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -4,23 +4,24 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 depends on ARCH_RENESAS || COMPILE_TEST - imply DRM_RCAR_CMM - imply DRM_RCAR_LVDS select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS help Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. -config DRM_RCAR_CMM - tristate "R-Car DU Color Management Module (CMM) Support" - depends on DRM && OF +config DRM_RCAR_USE_CMM + bool "R-Car DU Color Management Module (CMM) Support" depends on DRM_RCAR_DU + default DRM_RCAR_DU help Enable support for R-Car Color Management Module (CMM). +config DRM_RCAR_CMM + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_CMM + config DRM_RCAR_DW_HDMI tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support" depends on DRM && OF @@ -28,15 +29,27 @@ config DRM_RCAR_DW_HDMI help Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder. +config DRM_RCAR_USE_LVDS + bool "R-Car DU LVDS Encoder Support" + depends on DRM_BRIDGE && OF + default DRM_RCAR_DU + help + Enable support for the R-Car Display Unit embedded LVDS encoders. + config DRM_RCAR_LVDS - tristate "R-Car DU LVDS Encoder Support" - depends on DRM && DRM_BRIDGE && OF + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_LVDS select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY + +config DRM_RCAR_MIPI_DSI + tristate "R-Car DU MIPI DSI Encoder Support" + depends on DRM && DRM_BRIDGE && OF + select DRM_MIPI_DSI help - Enable support for the R-Car Display Unit embedded LVDS encoders. + Enable support for the R-Car Display Unit embedded MIPI DSI encoders. config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 4d1187ccc3e5..286bc81b3e7c 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o +obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o # 'remote-endpoint' is fixed up at run-time DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index ea7e39d03545..f361a604337f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; + unsigned int hdse_offset; u32 dsmr; u32 escr; @@ -261,12 +262,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); escr = ESCR_DCLKSEL_DCLKIN | div; - } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) || + rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) { /* - * Use the LVDS PLL output as the dot clock when outputting to - * the LVDS encoder on an SoC that supports this clock routing - * option. We use the clock directly in that case, without any - * additional divider. + * Use the external LVDS or DSI PLL output as the dot clock when + * outputting to the LVDS or DSI encoder on an SoC that supports + * this clock routing option. We use the clock directly in that + * case, without any additional divider. */ escr = ESCR_DCLKSEL_DCLKIN; } else { @@ -298,10 +300,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, dsmr); + hdse_offset = 19; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + hdse_offset += 25; + /* Display timings */ - rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); + rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - + hdse_offset); rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + - mode->hdisplay - 19); + mode->hdisplay - hdse_offset); rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); @@ -836,6 +843,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + unsigned int min_sync_porch; unsigned int vbp; if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) @@ -843,9 +851,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, /* * The hardware requires a minimum combined horizontal sync and back - * porch of 20 pixels and a minimum vertical back porch of 3 lines. + * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is + * used), and a minimum vertical back porch of 3 lines. */ - if (mode->htotal - mode->hsync_start < 20) + min_sync_porch = 20; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + min_sync_porch += 25; + + if (mode->htotal - mode->hsync_start < min_sync_porch) return MODE_HBLANK_NARROW; vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); @@ -1206,7 +1219,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, int ret; /* Get the CRTC clock and the optional external clock. */ - if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) { sprintf(clk_name, "du.%u", hwindex); name = clk_name; } else { @@ -1243,7 +1256,10 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[hwindex]; rcrtc->index = hwindex; - rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC; + rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES; + + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC)) + rcrtc->dsysr |= DSYSR_TVM_TVSYNC; if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane; @@ -1269,7 +1285,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, drm_crtc_helper_add(crtc, &crtc_helper_funcs); /* Register the interrupt handler. */ - if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) { + if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) { /* The IRQ's are associated with the CRTC (sw)index. */ irq = platform_get_irq(pdev, swindex); irqflags = 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index 5f2940c42225..66e8839db708 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -93,17 +93,6 @@ struct rcar_du_crtc_state { #define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state) -enum rcar_du_output { - RCAR_DU_OUTPUT_DPAD0, - RCAR_DU_OUTPUT_DPAD1, - RCAR_DU_OUTPUT_LVDS0, - RCAR_DU_OUTPUT_LVDS1, - RCAR_DU_OUTPUT_HDMI0, - RCAR_DU_OUTPUT_HDMI1, - RCAR_DU_OUTPUT_TCON, - RCAR_DU_OUTPUT_MAX, -}; - int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, unsigned int hwindex); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 4ac26d08ebb4..5a8131ef81d5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -8,6 +8,7 @@ */ #include <linux/clk.h> +#include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/module.h> @@ -36,7 +37,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -58,7 +60,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { static const struct rcar_du_device_info rzg1_du_r8a7745_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -79,7 +82,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = { static const struct rcar_du_device_info rzg1_du_r8a77470_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -105,7 +109,8 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = { static const struct rcar_du_device_info rcar_du_r8a774a1_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -134,7 +139,8 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = { static const struct rcar_du_device_info rcar_du_r8a774b1_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -163,7 +169,8 @@ static const struct rcar_du_device_info rcar_du_r8a774b1_info = { static const struct rcar_du_device_info rcar_du_r8a774c0_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE, .channels_mask = BIT(1) | BIT(0), .routes = { @@ -189,7 +196,8 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = { static const struct rcar_du_device_info rcar_du_r8a774e1_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -239,7 +247,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { static const struct rcar_du_device_info rcar_du_r8a7790_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .quirks = RCAR_DU_QUIRK_ALIGN_128B, @@ -269,7 +278,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { /* M2-W (r8a7791) and M2-N (r8a7793) are identical */ static const struct rcar_du_device_info rcar_du_r8a7791_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -292,7 +302,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { static const struct rcar_du_device_info rcar_du_r8a7792_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -311,7 +322,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = { static const struct rcar_du_device_info rcar_du_r8a7794_info = { .gen = 2, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, .channels_mask = BIT(1) | BIT(0), @@ -333,7 +345,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = { static const struct rcar_du_device_info rcar_du_r8a7795_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -366,7 +379,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { static const struct rcar_du_device_info rcar_du_r8a7796_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -395,7 +409,8 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { static const struct rcar_du_device_info rcar_du_r8a77965_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -424,7 +439,8 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = { static const struct rcar_du_device_info rcar_du_r8a77970_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE | RCAR_DU_FEATURE_INTERLACED | RCAR_DU_FEATURE_TVM_SYNC, @@ -448,7 +464,8 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = { static const struct rcar_du_device_info rcar_du_r8a7799x_info = { .gen = 3, - .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_CRTC_CLOCK | RCAR_DU_FEATURE_VSP1_SOURCE, .channels_mask = BIT(1) | BIT(0), .routes = { @@ -473,6 +490,25 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = { .lvds_clk_mask = BIT(1) | BIT(0), }; +static const struct rcar_du_device_info rcar_du_r8a779a0_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ + | RCAR_DU_FEATURE_VSP1_SOURCE, + .channels_mask = BIT(1) | BIT(0), + .routes = { + /* R8A779A0 has two MIPI DSI outputs. */ + [RCAR_DU_OUTPUT_DSI0] = { + .possible_crtcs = BIT(0), + .port = 0, + }, + [RCAR_DU_OUTPUT_DSI1] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + }, + .dsi_clk_mask = BIT(1) | BIT(0), +}; + static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info }, { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, @@ -497,11 +533,32 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info }, { .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info }, { .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info }, + { .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info }, { } }; MODULE_DEVICE_TABLE(of, rcar_du_of_table); +const char *rcar_du_output_name(enum rcar_du_output output) +{ + static const char * const names[] = { + [RCAR_DU_OUTPUT_DPAD0] = "DPAD0", + [RCAR_DU_OUTPUT_DPAD1] = "DPAD1", + [RCAR_DU_OUTPUT_DSI0] = "DSI0", + [RCAR_DU_OUTPUT_DSI1] = "DSI1", + [RCAR_DU_OUTPUT_HDMI0] = "HDMI0", + [RCAR_DU_OUTPUT_HDMI1] = "HDMI1", + [RCAR_DU_OUTPUT_LVDS0] = "LVDS0", + [RCAR_DU_OUTPUT_LVDS1] = "LVDS1", + [RCAR_DU_OUTPUT_TCON] = "TCON", + }; + + if (output >= ARRAY_SIZE(names) || !names[output]) + return "UNKNOWN"; + + return names[output]; +} + /* ----------------------------------------------------------------------------- * DRM operations */ @@ -510,7 +567,11 @@ DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops); static const struct drm_driver rcar_du_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create), + .dumb_create = rcar_du_dumb_create, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table, + .gem_prime_mmap = drm_gem_prime_mmap, .fops = &rcar_du_fops, .name = "rcar-du", .desc = "Renesas R-Car Display Unit", @@ -570,7 +631,7 @@ static void rcar_du_shutdown(struct platform_device *pdev) static int rcar_du_probe(struct platform_device *pdev) { struct rcar_du_device *rcdu; - struct resource *mem; + unsigned int mask; int ret; /* Allocate and initialize the R-Car device structure. */ @@ -585,11 +646,20 @@ static int rcar_du_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rcdu); /* I/O resources */ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem); + rcdu->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rcdu->mmio)) return PTR_ERR(rcdu->mmio); + /* + * Set the DMA coherent mask to reflect the DU 32-bit DMA address space + * limitations. When sourcing frames from a VSP the DU doesn't perform + * any memory access so set the mask to 40 bits to accept all buffers. + */ + mask = rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE) ? 40 : 32; + ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(mask)); + if (ret) + return ret; + /* DRM/KMS objects */ ret = rcar_du_modeset_init(rcdu); if (ret < 0) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 02ca2d0e1b55..101f42df86ea 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -26,13 +26,27 @@ struct drm_bridge; struct drm_property; struct rcar_du_device; -#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ -#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */ -#define RCAR_DU_FEATURE_INTERLACED BIT(2) /* HW supports interlaced */ -#define RCAR_DU_FEATURE_TVM_SYNC BIT(3) /* Has TV switch/sync modes */ +#define RCAR_DU_FEATURE_CRTC_IRQ BIT(0) /* Per-CRTC IRQ */ +#define RCAR_DU_FEATURE_CRTC_CLOCK BIT(1) /* Per-CRTC clock */ +#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */ +#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */ +#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */ +enum rcar_du_output { + RCAR_DU_OUTPUT_DPAD0, + RCAR_DU_OUTPUT_DPAD1, + RCAR_DU_OUTPUT_DSI0, + RCAR_DU_OUTPUT_DSI1, + RCAR_DU_OUTPUT_HDMI0, + RCAR_DU_OUTPUT_HDMI1, + RCAR_DU_OUTPUT_LVDS0, + RCAR_DU_OUTPUT_LVDS1, + RCAR_DU_OUTPUT_TCON, + RCAR_DU_OUTPUT_MAX, +}; + /* * struct rcar_du_output_routing - Output routing specification * @possible_crtcs: bitmask of possible CRTCs for the output @@ -56,6 +70,7 @@ struct rcar_du_output_routing { * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*) * @num_lvds: number of internal LVDS encoders * @dpll_mask: bit mask of DU channels equipped with a DPLL + * @dsi_clk_mask: bitmask of channels that can use the DSI clock as dot clock * @lvds_clk_mask: bitmask of channels that can use the LVDS clock as dot clock */ struct rcar_du_device_info { @@ -66,6 +81,7 @@ struct rcar_du_device_info { struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; unsigned int num_lvds; unsigned int dpll_mask; + unsigned int dsi_clk_mask; unsigned int lvds_clk_mask; }; @@ -126,4 +142,6 @@ static inline void rcar_du_write(struct rcar_du_device *rcdu, u32 reg, u32 data) iowrite32(data, rcdu->mmio + reg); } +const char *rcar_du_output_name(enum rcar_du_output output); + #endif /* __RCAR_DU_DRV_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 0daa8bba50f5..3977aaa1ab5a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -86,17 +86,25 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, } /* - * Create and initialize the encoder. On Gen3 skip the LVDS1 output if + * Create and initialize the encoder. On Gen3, skip the LVDS1 output if * the LVDS1 encoder is used as a companion for LVDS0 in dual-link - * mode. + * mode, or any LVDS output if it isn't connected. The latter may happen + * on D3 or E3 as the LVDS encoders are needed to provide the pixel + * clock to the DU, even when the LVDS outputs are not used. */ - if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) { - if (rcar_lvds_dual_link(bridge)) + if (rcdu->info->gen >= 3) { + if (output == RCAR_DU_OUTPUT_LVDS1 && + rcar_lvds_dual_link(bridge)) + return -ENOLINK; + + if ((output == RCAR_DU_OUTPUT_LVDS0 || + output == RCAR_DU_OUTPUT_LVDS1) && + !rcar_lvds_is_connected(bridge)) return -ENOLINK; } - dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", - enc_node, output); + dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n", + enc_node, rcar_du_output_name(output)); renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base, &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE, @@ -110,8 +118,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, ret = drm_bridge_attach(&renc->base, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) { - dev_err(rcdu->dev, "failed to attach bridge for output %u\n", - output); + dev_err(rcdu->dev, + "failed to attach bridge %pOF for output %s (%d)\n", + bridge->of_node, rcar_du_output_name(output), ret); return ret; } @@ -119,7 +128,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base); if (IS_ERR(connector)) { dev_err(rcdu->dev, - "failed to created connector for output %u\n", output); + "failed to created connector for output %s (%ld)\n", + rcar_du_output_name(output), PTR_ERR(connector)); return PTR_ERR(connector); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c index 88a783ceb3e9..8665a1dd2186 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_group.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c @@ -122,10 +122,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp) didsr = DIDSR_CODE; for (i = 0; i < num_crtcs; ++i, ++rcrtc) { if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) - didsr |= DIDSR_LCDS_LVDS0(i) + didsr |= DIDSR_LDCS_LVDS0(i) | DIDSR_PDCS_CLK(i, 0); + else if (rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) + didsr |= DIDSR_LDCS_DSI(i); else - didsr |= DIDSR_LCDS_DCLKIN(i) + didsr |= DIDSR_LDCS_DCLKIN(i) | DIDSR_PDCS_CLK(i, 0); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index fdb8a0d127ad..190dbb7f15dd 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -19,6 +19,7 @@ #include <drm/drm_vblank.h> #include <linux/device.h> +#include <linux/dma-buf.h> #include <linux/of_graph.h> #include <linux/of_platform.h> #include <linux/wait.h> @@ -325,6 +326,51 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) * Frame buffer */ +static const struct drm_gem_object_funcs rcar_du_gem_funcs = { + .free = drm_gem_cma_object_free, + .print_info = drm_gem_cma_object_print_info, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, + .mmap = drm_gem_cma_object_mmap, + .vm_ops = &drm_gem_cma_vm_ops, +}; + +struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct rcar_du_device *rcdu = to_rcar_du_device(dev); + struct drm_gem_cma_object *cma_obj; + struct drm_gem_object *gem_obj; + int ret; + + if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) + return drm_gem_cma_prime_import_sg_table(dev, attach, sgt); + + /* Create a CMA GEM buffer. */ + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + + gem_obj = &cma_obj->base; + gem_obj->funcs = &rcar_du_gem_funcs; + + drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size); + cma_obj->map_noncoherent = false; + + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret) { + drm_gem_object_release(gem_obj); + kfree(cma_obj); + return ERR_PTR(ret); + } + + cma_obj->paddr = 0; + cma_obj->sgt = sgt; + + return gem_obj; +} + int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { @@ -513,8 +559,8 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, ret = rcar_du_encoder_init(rcdu, output, entity); if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK) dev_warn(rcdu->dev, - "failed to initialize encoder %pOF on output %u (%d), skipping\n", - entity, output, ret); + "failed to initialize encoder %pOF on output %s (%d), skipping\n", + entity, rcar_du_output_name(output), ret); of_node_put(entity); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index 8f5fff176754..789154e19535 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -12,10 +12,13 @@ #include <linux/types.h> +struct dma_buf_attachment; struct drm_file; struct drm_device; +struct drm_gem_object; struct drm_mode_create_dumb; struct rcar_du_device; +struct sg_table; struct rcar_du_format_info { u32 fourcc; @@ -34,4 +37,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu); int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); +struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + #endif /* __RCAR_DU_KMS_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index fb9964949368..1cdaa51eb9ac 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -257,10 +257,11 @@ #define DIDSR 0x20028 #define DIDSR_CODE (0x7790 << 16) -#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2)) -#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2)) -#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2)) -#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2)) +#define DIDSR_LDCS_DCLKIN(n) (0 << (8 + (n) * 2)) +#define DIDSR_LDCS_DSI(n) (2 << (8 + (n) * 2)) /* V3U only */ +#define DIDSR_LDCS_LVDS0(n) (2 << (8 + (n) * 2)) +#define DIDSR_LDCS_LVDS1(n) (3 << (8 + (n) * 2)) +#define DIDSR_LDCS_MASK(n) (3 << (8 + (n) * 2)) #define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2)) #define DIDSR_PDCS_MASK(n) (3 << ((n) * 2)) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 23e41c83c875..b7fc5b069cbc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -187,17 +187,43 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, struct sg_table sg_tables[3]) { struct rcar_du_device *rcdu = vsp->dev; - unsigned int i; + unsigned int i, j; int ret; for (i = 0; i < fb->format->num_planes; ++i) { struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); struct sg_table *sgt = &sg_tables[i]; - ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr, - gem->base.size); - if (ret) - goto fail; + if (gem->sgt) { + struct scatterlist *src; + struct scatterlist *dst; + + /* + * If the GEM buffer has a scatter gather table, it has + * been imported from a dma-buf and has no physical + * address as it might not be physically contiguous. + * Copy the original scatter gather table to map it to + * the VSP. + */ + ret = sg_alloc_table(sgt, gem->sgt->orig_nents, + GFP_KERNEL); + if (ret) + goto fail; + + src = gem->sgt->sgl; + dst = sgt->sgl; + for (j = 0; j < gem->sgt->orig_nents; ++j) { + sg_set_page(dst, sg_page(src), src->length, + src->offset); + src = sg_next(src); + dst = sg_next(dst); + } + } else { + ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, + gem->paddr, gem->base.size); + if (ret) + goto fail; + } ret = vsp1_du_map_sg(vsp->vsp, sgt); if (ret) { diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index d061b8de748f..72a272cfc11e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -576,6 +576,9 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + if (!lvds->next_bridge) + return 0; + return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge, flags); } @@ -598,6 +601,14 @@ bool rcar_lvds_dual_link(struct drm_bridge *bridge) } EXPORT_SYMBOL_GPL(rcar_lvds_dual_link); +bool rcar_lvds_is_connected(struct drm_bridge *bridge) +{ + struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + + return lvds->next_bridge != NULL; +} +EXPORT_SYMBOL_GPL(rcar_lvds_is_connected); + /* ----------------------------------------------------------------------------- * Probe & Remove */ @@ -802,7 +813,6 @@ static int rcar_lvds_probe(struct platform_device *pdev) { const struct soc_device_attribute *attr; struct rcar_lvds *lvds; - struct resource *mem; int ret; lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); @@ -825,8 +835,7 @@ static int rcar_lvds_probe(struct platform_device *pdev) lvds->bridge.funcs = &rcar_lvds_bridge_ops; lvds->bridge.of_node = pdev->dev.of_node; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lvds->mmio = devm_ioremap_resource(&pdev->dev, mem); + lvds->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lvds->mmio)) return PTR_ERR(lvds->mmio); diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.h b/drivers/gpu/drm/rcar-du/rcar_lvds.h index 222ec0e60785..eb7c6ef03b00 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.h +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.h @@ -16,6 +16,7 @@ struct drm_bridge; int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq); void rcar_lvds_clk_disable(struct drm_bridge *bridge); bool rcar_lvds_dual_link(struct drm_bridge *bridge); +bool rcar_lvds_is_connected(struct drm_bridge *bridge); #else static inline int rcar_lvds_clk_enable(struct drm_bridge *bridge, unsigned long freq) @@ -27,6 +28,10 @@ static inline bool rcar_lvds_dual_link(struct drm_bridge *bridge) { return false; } +static inline bool rcar_lvds_is_connected(struct drm_bridge *bridge) +{ + return false; +} #endif /* CONFIG_DRM_RCAR_LVDS */ #endif /* __RCAR_LVDS_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c new file mode 100644 index 000000000000..891bb956fd61 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rcar_mipi_dsi.c -- R-Car MIPI DSI Encoder + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#include "rcar_mipi_dsi_regs.h" + +struct rcar_mipi_dsi { + struct device *dev; + const struct rcar_mipi_dsi_device_info *info; + struct reset_control *rstc; + + struct mipi_dsi_host host; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector connector; + + void __iomem *mmio; + struct { + struct clk *mod; + struct clk *pll; + struct clk *dsi; + } clocks; + + enum mipi_dsi_pixel_format format; + unsigned int num_data_lanes; + unsigned int lanes; +}; + +static inline struct rcar_mipi_dsi * +bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct rcar_mipi_dsi, bridge); +} + +static inline struct rcar_mipi_dsi * +host_to_rcar_mipi_dsi(struct mipi_dsi_host *host) +{ + return container_of(host, struct rcar_mipi_dsi, host); +} + +static const u32 phtw[] = { + 0x01020114, 0x01600115, /* General testing */ + 0x01030116, 0x0102011d, /* General testing */ + 0x011101a4, 0x018601a4, /* 1Gbps testing */ + 0x014201a0, 0x010001a3, /* 1Gbps testing */ + 0x0101011f, /* 1Gbps testing */ +}; + +static const u32 phtw2[] = { + 0x010c0130, 0x010c0140, /* General testing */ + 0x010c0150, 0x010c0180, /* General testing */ + 0x010c0190, + 0x010a0160, 0x010a0170, + 0x01800164, 0x01800174, /* 1Gbps testing */ +}; + +static const u32 hsfreqrange_table[][2] = { + { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 }, + { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 }, + { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 }, + { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 }, + { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 }, + { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 }, + { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 }, + { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 }, + { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 }, + { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 }, + { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a }, + { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b }, + { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b }, + { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c }, + { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d }, + { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e }, + { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f }, + { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 }, + { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 }, + { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 }, + { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 }, + { /* sentinel */ }, +}; + +struct vco_cntrl_value { + u32 min_freq; + u32 max_freq; + u16 value; +}; + +static const struct vco_cntrl_value vco_cntrl_table[] = { + { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f }, + { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 }, + { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f }, + { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 }, + { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f }, + { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 }, + { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f }, + { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 }, + { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 }, + { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 }, + { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 }, + { /* sentinel */ }, +}; + +static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data) +{ + iowrite32(data, dsi->mmio + reg); +} + +static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg) +{ + return ioread32(dsi->mmio + reg); +} + +static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr); +} + +static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set); +} + +static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw) +{ + u32 status; + int ret; + + rcar_mipi_dsi_write(dsi, PHTW, phtw); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (PHTW_DWEN | PHTW_CWEN)), + 2000, 10000, false, dsi, PHTW); + if (ret < 0) { + dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n", + phtw); + return ret; + } + + return ret; +} + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +struct dsi_setup_info { + unsigned long fout; + u16 vco_cntrl; + u16 prop_cntrl; + u16 hsfreqrange; + u16 div; + unsigned int m; + unsigned int n; +}; + +static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, + struct clk *clk, unsigned long target, + struct dsi_setup_info *setup_info) +{ + + const struct vco_cntrl_value *vco_cntrl; + unsigned long fout_target; + unsigned long fin, fout; + unsigned long hsfreq; + unsigned int best_err = -1; + unsigned int divider; + unsigned int n; + unsigned int i; + unsigned int err; + + /* + * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count) + * The range out Fout is [40 - 1250] Mhz + */ + fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format) + / (2 * dsi->lanes); + if (fout_target < 40000000 || fout_target > 1250000000) + return; + + /* Find vco_cntrl */ + for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) { + if (fout_target > vco_cntrl->min_freq && + fout_target <= vco_cntrl->max_freq) { + setup_info->vco_cntrl = vco_cntrl->value; + if (fout_target >= 1150000000) + setup_info->prop_cntrl = 0x0c; + else + setup_info->prop_cntrl = 0x0b; + break; + } + } + + /* Add divider */ + setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4; + + /* Find hsfreqrange */ + hsfreq = fout_target * 2; + for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) { + if (hsfreqrange_table[i][0] >= hsfreq) { + setup_info->hsfreqrange = hsfreqrange_table[i][1]; + break; + } + } + + /* + * Calculate n and m for PLL clock + * Following the HW manual the ranges of n and m are + * n = [3-8] and m = [64-625] + */ + fin = clk_get_rate(clk); + divider = 1 << setup_info->div; + for (n = 3; n < 9; n++) { + unsigned long fpfd; + unsigned int m; + + fpfd = fin / n; + + for (m = 64; m < 626; m++) { + fout = fpfd * m / divider; + err = abs((long)(fout - fout_target) * 10000 / + (long)fout_target); + if (err < best_err) { + setup_info->m = m - 2; + setup_info->n = n - 1; + setup_info->fout = fout; + best_err = err; + if (err == 0) + goto done; + } + } + } + +done: + dev_dbg(dsi->dev, + "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n", + clk, fin, setup_info->fout, fout_target, best_err / 100, + best_err % 100, setup_info->m, setup_info->n, setup_info->div); + dev_dbg(dsi->dev, + "vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n", + setup_info->vco_cntrl, setup_info->prop_cntrl, + setup_info->hsfreqrange); +} + +static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + u32 setr; + u32 vprmset0r; + u32 vprmset1r; + u32 vprmset2r; + u32 vprmset3r; + u32 vprmset4r; + + /* Configuration for Pixel Stream and Packet Header */ + if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16); + else { + dev_warn(dsi->dev, "unsupported format"); + return; + } + + /* Configuration for Blanking sequence and Input Pixel */ + setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN + | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES + | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM; + rcar_mipi_dsi_write(dsi, TXVMSETR, setr); + + /* Configuration for Video Parameters */ + vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ? + TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW) + | (mode->flags & DRM_MODE_FLAG_PHSYNC ? + TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW) + | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24; + + vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay) + | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start); + + vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay) + | TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end); + + vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay) + | TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start); + + vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay) + | TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end); + + rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r); +} + +static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + struct dsi_setup_info setup_info = {}; + unsigned int timeout; + int ret, i; + int dsi_format; + u32 phy_setup; + u32 clockset2, clockset3; + u32 ppisetr; + u32 vclkset; + + /* Checking valid format */ + dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format); + if (dsi_format < 0) { + dev_warn(dsi->dev, "invalid format"); + return -EINVAL; + } + + /* Parameters Calculation */ + rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll, + mode->clock * 1000, &setup_info); + + /* LPCLK enable */ + rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN); + + /* CFGCLK enabled */ + rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN); + + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR); + rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR); + + /* PHY setting */ + phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP); + phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK; + phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange); + rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup); + + for (i = 0; i < ARRAY_SIZE(phtw); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]); + if (ret < 0) + return ret; + } + + /* PLL Clock Setting */ + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + + clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n) + | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl); + clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl) + | CLOCKSET3_INT_CNTRL(0) + | CLOCKSET3_CPBIAS_CNTRL(0x10) + | CLOCKSET3_GMP_CNTRL(1); + rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2); + rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3); + + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + udelay(10); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + + ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); + + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ); + usleep_range(400, 500); + + /* Checking PPI clock status register */ + for (timeout = 10; timeout > 0; --timeout) { + if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) && + (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) && + (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK)) + break; + + usleep_range(1000, 2000); + } + + if (!timeout) { + dev_err(dsi->dev, "failed to enable PPI clock\n"); + return -ETIMEDOUT; + } + + for (i = 0; i < ARRAY_SIZE(phtw2); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]); + if (ret < 0) + return ret; + } + + /* Enable DOT clock */ + vclkset = VCLKSET_CKEN; + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + if (dsi_format == 24) + vclkset |= VCLKSET_BPP_24; + else if (dsi_format == 18) + vclkset |= VCLKSET_BPP_18; + else if (dsi_format == 16) + vclkset |= VCLKSET_BPP_16; + else { + dev_warn(dsi->dev, "unsupported format"); + return -EINVAL; + } + vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div) + | VCLKSET_LANE(dsi->lanes - 1); + + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + /* After setting VCLKSET register, enable VCLKEN */ + rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN); + + dev_dbg(dsi->dev, "DSI device is started\n"); + + return 0; +} + +static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi) +{ + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + dev_dbg(dsi->dev, "DSI device is shutdown\n"); +} + +static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi) +{ + int ret; + + reset_control_deassert(dsi->rstc); + + ret = clk_prepare_enable(dsi->clocks.mod); + if (ret < 0) + goto err_reset; + + ret = clk_prepare_enable(dsi->clocks.dsi); + if (ret < 0) + goto err_clock; + + return 0; + +err_clock: + clk_disable_unprepare(dsi->clocks.mod); +err_reset: + reset_control_assert(dsi->rstc); + return ret; +} + +static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi) +{ + clk_disable_unprepare(dsi->clocks.dsi); + clk_disable_unprepare(dsi->clocks.mod); + + reset_control_assert(dsi->rstc); +} + +static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi) +{ + /* + * In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont + * write how to check. So we skip this check in this patch + */ + u32 status; + int ret; + + /* Start HS clock. */ + rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & PPICLSR_TOHS, + 2000, 10000, false, dsi, PPICLSR); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable HS clock\n"); + return ret; + } + + rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS); + + return 0; +} + +static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi) +{ + u32 status; + int ret; + + /* Wait for the link to be ready. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)), + 2000, 10000, false, dsi, LINKSR); + if (ret < 0) { + dev_err(dsi->dev, "Link failed to become ready\n"); + return ret; + } + + /* De-assert video FIFO clear. */ + rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_VFRDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n"); + return ret; + } + + /* Enable transmission in video mode. */ + rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_RDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to enable video transmission\n"); + return ret; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Bridge + */ + +static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + flags); +} + +static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + int ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode; + + ret = rcar_mipi_dsi_clk_enable(dsi); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable DSI clocks\n"); + return; + } + + ret = rcar_mipi_dsi_startup(dsi, mode); + if (ret < 0) + goto err_dsi_startup; + + rcar_mipi_dsi_set_display_timing(dsi, mode); + + ret = rcar_mipi_dsi_start_hs_clock(dsi); + if (ret < 0) + goto err_dsi_start_hs; + + rcar_mipi_dsi_start_video(dsi); + + return; + +err_dsi_start_hs: + rcar_mipi_dsi_shutdown(dsi); +err_dsi_startup: + rcar_mipi_dsi_clk_disable(dsi); +} + +static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + rcar_mipi_dsi_shutdown(dsi); + rcar_mipi_dsi_clk_disable(dsi); +} + +static enum drm_mode_status +rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 297000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = { + .attach = rcar_mipi_dsi_attach, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_enable = rcar_mipi_dsi_atomic_enable, + .atomic_disable = rcar_mipi_dsi_atomic_disable, + .mode_valid = rcar_mipi_dsi_bridge_mode_valid, +}; + +/* ----------------------------------------------------------------------------- + * Host setting + */ + +static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + int ret; + + if (device->lanes > dsi->num_data_lanes) + return -EINVAL; + + dsi->lanes = device->lanes; + dsi->format = device->format; + + dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, + 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + dev_err(dsi->dev, "failed to get next bridge: %d\n", ret); + return ret; + } + + /* Initialize the DRM bridge. */ + dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops; + dsi->bridge.of_node = dsi->dev->of_node; + drm_bridge_add(&dsi->bridge); + + return 0; +} + +static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + + drm_bridge_remove(&dsi->bridge); + + return 0; +} + +static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = { + .attach = rcar_mipi_dsi_host_attach, + .detach = rcar_mipi_dsi_host_detach, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove + */ + +static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi) +{ + struct device_node *ep; + u32 data_lanes[4]; + int ret; + + ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0); + if (!ep) { + dev_dbg(dsi->dev, "unconnected port@1\n"); + return -ENODEV; + } + + ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes, + 1, 4); + of_node_put(ep); + + if (ret < 0) { + dev_err(dsi->dev, "missing or invalid data-lanes property\n"); + return -ENODEV; + } + + dsi->num_data_lanes = ret; + return 0; +} + +static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi, + const char *name, + bool optional) +{ + struct clk *clk; + + clk = devm_clk_get(dsi->dev, name); + if (!IS_ERR(clk)) + return clk; + + if (PTR_ERR(clk) == -ENOENT && optional) + return NULL; + + dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n", + name ? name : "module"); + + return clk; +} + +static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi) +{ + dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false); + if (IS_ERR(dsi->clocks.mod)) + return PTR_ERR(dsi->clocks.mod); + + dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true); + if (IS_ERR(dsi->clocks.pll)) + return PTR_ERR(dsi->clocks.pll); + + dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true); + if (IS_ERR(dsi->clocks.dsi)) + return PTR_ERR(dsi->clocks.dsi); + + if (!dsi->clocks.pll && !dsi->clocks.dsi) { + dev_err(dsi->dev, "no input clock (pll, dsi)\n"); + return -EINVAL; + } + + return 0; +} + +static int rcar_mipi_dsi_probe(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi; + struct resource *mem; + int ret; + + dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); + if (dsi == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, dsi); + + dsi->dev = &pdev->dev; + dsi->info = of_device_get_match_data(&pdev->dev); + + ret = rcar_mipi_dsi_parse_dt(dsi); + if (ret < 0) + return ret; + + /* Acquire resources. */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dsi->mmio = devm_ioremap_resource(dsi->dev, mem); + if (IS_ERR(dsi->mmio)) + return PTR_ERR(dsi->mmio); + + ret = rcar_mipi_dsi_get_clocks(dsi); + if (ret < 0) + return ret; + + dsi->rstc = devm_reset_control_get(dsi->dev, NULL); + if (IS_ERR(dsi->rstc)) { + dev_err(dsi->dev, "failed to get cpg reset\n"); + return PTR_ERR(dsi->rstc); + } + + /* Initialize the DSI host. */ + dsi->host.dev = dsi->dev; + dsi->host.ops = &rcar_mipi_dsi_host_ops; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) + return ret; + + return 0; +} + +static int rcar_mipi_dsi_remove(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev); + + mipi_dsi_host_unregister(&dsi->host); + + return 0; +} + +static const struct of_device_id rcar_mipi_dsi_of_table[] = { + { .compatible = "renesas,r8a779a0-dsi-csi2-tx" }, + { } +}; + +MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table); + +static struct platform_driver rcar_mipi_dsi_platform_driver = { + .probe = rcar_mipi_dsi_probe, + .remove = rcar_mipi_dsi_remove, + .driver = { + .name = "rcar-mipi-dsi", + .of_match_table = rcar_mipi_dsi_of_table, + }, +}; + +module_platform_driver(rcar_mipi_dsi_platform_driver); + +MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h new file mode 100644 index 000000000000..0e7a9274749f --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rcar_mipi_dsi_regs.h -- R-Car MIPI DSI Interface Registers Definitions + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#ifndef __RCAR_MIPI_DSI_REGS_H__ +#define __RCAR_MIPI_DSI_REGS_H__ + +#define LINKSR 0x010 +#define LINKSR_LPBUSY (1 << 1) +#define LINKSR_HSBUSY (1 << 0) + +/* + * Video Mode Register + */ +#define TXVMSETR 0x180 +#define TXVMSETR_SYNSEQ_PULSES (0 << 16) +#define TXVMSETR_SYNSEQ_EVENTS (1 << 16) +#define TXVMSETR_VSTPM (1 << 15) +#define TXVMSETR_PIXWDTH (1 << 8) +#define TXVMSETR_VSEN_EN (1 << 4) +#define TXVMSETR_VSEN_DIS (0 << 4) +#define TXVMSETR_HFPBPEN_EN (1 << 2) +#define TXVMSETR_HFPBPEN_DIS (0 << 2) +#define TXVMSETR_HBPBPEN_EN (1 << 1) +#define TXVMSETR_HBPBPEN_DIS (0 << 1) +#define TXVMSETR_HSABPEN_EN (1 << 0) +#define TXVMSETR_HSABPEN_DIS (0 << 0) + +#define TXVMCR 0x190 +#define TXVMCR_VFCLR (1 << 12) +#define TXVMCR_EN_VIDEO (1 << 0) + +#define TXVMSR 0x1a0 +#define TXVMSR_STR (1 << 16) +#define TXVMSR_VFRDY (1 << 12) +#define TXVMSR_ACT (1 << 8) +#define TXVMSR_RDY (1 << 0) + +#define TXVMSCR 0x1a4 +#define TXVMSCR_STR (1 << 16) + +#define TXVMPSPHSETR 0x1c0 +#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16) +#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16) +#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16) +#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16) +#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16) + +#define TXVMVPRMSET0R 0x1d0 +#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17) +#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17) +#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16) +#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16) +#define TXVMVPRMSET0R_CSPC_RGB (0 << 4) +#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4) +#define TXVMVPRMSET0R_BPP_16 (0 << 0) +#define TXVMVPRMSET0R_BPP_18 (1 << 0) +#define TXVMVPRMSET0R_BPP_24 (2 << 0) + +#define TXVMVPRMSET1R 0x1d4 +#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET2R 0x1d8 +#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0) + +#define TXVMVPRMSET3R 0x1dc +#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET4R 0x1e0 +#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0) + +/* + * PHY-Protocol Interface (PPI) Registers + */ +#define PPISETR 0x700 +#define PPISETR_DLEN_0 (0x1 << 0) +#define PPISETR_DLEN_1 (0x3 << 0) +#define PPISETR_DLEN_2 (0x7 << 0) +#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_CLEN (1 << 8) + +#define PPICLCR 0x710 +#define PPICLCR_TXREQHS (1 << 8) +#define PPICLCR_TXULPSEXT (1 << 1) +#define PPICLCR_TXULPSCLK (1 << 0) + +#define PPICLSR 0x720 +#define PPICLSR_HSTOLP (1 << 27) +#define PPICLSR_TOHS (1 << 26) +#define PPICLSR_STPST (1 << 0) + +#define PPICLSCR 0x724 +#define PPICLSCR_HSTOLP (1 << 27) +#define PPICLSCR_TOHS (1 << 26) + +#define PPIDLSR 0x760 +#define PPIDLSR_STPST (0xf << 0) + +/* + * Clocks registers + */ +#define LPCLKSET 0x1000 +#define LPCLKSET_CKEN (1 << 8) +#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0) + +#define CFGCLKSET 0x1004 +#define CFGCLKSET_CKEN (1 << 8) +#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0) + +#define DOTCLKDIV 0x1008 +#define DOTCLKDIV_CKEN (1 << 8) +#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0) + +#define VCLKSET 0x100c +#define VCLKSET_CKEN (1 << 16) +#define VCLKSET_COLOR_RGB (0 << 8) +#define VCLKSET_COLOR_YCC (1 << 8) +#define VCLKSET_DIV(x) (((x) & 0x3) << 4) +#define VCLKSET_BPP_16 (0 << 2) +#define VCLKSET_BPP_18 (1 << 2) +#define VCLKSET_BPP_18L (2 << 2) +#define VCLKSET_BPP_24 (3 << 2) +#define VCLKSET_LANE(x) (((x) & 0x3) << 0) + +#define VCLKEN 0x1010 +#define VCLKEN_CKEN (1 << 0) + +#define PHYSETUP 0x1014 +#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16) +#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16) +#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8) +#define PHYSETUP_SHUTDOWNZ (1 << 1) +#define PHYSETUP_RSTZ (1 << 0) + +#define CLOCKSET1 0x101c +#define CLOCKSET1_LOCK_PHY (1 << 17) +#define CLOCKSET1_LOCK (1 << 16) +#define CLOCKSET1_CLKSEL (1 << 8) +#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2) +#define CLOCKSET1_CLKINSEL_DIG (1 << 2) +#define CLOCKSET1_CLKINSEL_DU (1 << 3) +#define CLOCKSET1_SHADOW_CLEAR (1 << 1) +#define CLOCKSET1_UPDATEPLL (1 << 0) + +#define CLOCKSET2 0x1020 +#define CLOCKSET2_M(x) (((x) & 0xfff) << 16) +#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8) +#define CLOCKSET2_N(x) (((x) & 0xf) << 0) + +#define CLOCKSET3 0x1024 +#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24) +#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16) +#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8) +#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0) + +#define PHTW 0x1034 +#define PHTW_DWEN (1 << 24) +#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16) +#define PHTW_CWEN (1 << 8) +#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0) + +#define PHTC 0x103c +#define PHTC_TESTCLR (1 << 0) + +#endif /* __RCAR_MIPI_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 17a9e7eb2130..1a56f696558c 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -5,7 +5,6 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o -rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 5dce875dbd11..16497c31d9f9 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1121,7 +1121,7 @@ static int cdn_dp_suspend(struct device *dev) return ret; } -static int cdn_dp_resume(struct device *dev) +static __maybe_unused int cdn_dp_resume(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index a9acbcc420d0..4ed7a6868197 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -267,6 +267,8 @@ struct dw_mipi_dsi_rockchip { struct dw_mipi_dsi *dmd; const struct rockchip_dw_dsi_chip_data *cdata; struct dw_mipi_dsi_plat_data pdata; + + bool dsi_bound; }; struct dphy_pll_parameter_map { @@ -772,10 +774,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) if (mux < 0) return; - pm_runtime_get_sync(dsi->dev); - if (dsi->slave) - pm_runtime_get_sync(dsi->slave->dev); - /* * For the RK3399, the clk of grf must be enabled before writing grf * register. And for RK3288 or other soc, this grf_clk must be NULL, @@ -794,20 +792,10 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) clk_disable_unprepare(dsi->grf_clk); } -static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder); - - if (dsi->slave) - pm_runtime_put(dsi->slave->dev); - pm_runtime_put(dsi->dev); -} - static const struct drm_encoder_helper_funcs dw_mipi_dsi_encoder_helper_funcs = { .atomic_check = dw_mipi_dsi_encoder_atomic_check, .enable = dw_mipi_dsi_encoder_enable, - .disable = dw_mipi_dsi_encoder_disable, }; static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, @@ -937,10 +925,14 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, put_device(second); } + pm_runtime_get_sync(dsi->dev); + if (dsi->slave) + pm_runtime_get_sync(dsi->slave->dev); + ret = clk_prepare_enable(dsi->pllref_clk); if (ret) { DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret); - return ret; + goto out_pm_runtime; } /* @@ -952,7 +944,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = clk_prepare_enable(dsi->grf_clk); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); - return ret; + goto out_pll_clk; } dw_mipi_dsi_rockchip_config(dsi); @@ -964,16 +956,27 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev); if (ret) { DRM_DEV_ERROR(dev, "Failed to create drm encoder\n"); - return ret; + goto out_pll_clk; } ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder); if (ret) { DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret); - return ret; + goto out_pll_clk; } + dsi->dsi_bound = true; + return 0; + +out_pll_clk: + clk_disable_unprepare(dsi->pllref_clk); +out_pm_runtime: + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); + + return ret; } static void dw_mipi_dsi_rockchip_unbind(struct device *dev, @@ -985,9 +988,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev, if (dsi->is_slave) return; + dsi->dsi_bound = false; + dw_mipi_dsi_unbind(dsi->dmd); clk_disable_unprepare(dsi->pllref_clk); + + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); } static const struct component_ops dw_mipi_dsi_rockchip_ops = { @@ -1275,6 +1284,36 @@ static const struct phy_ops dw_mipi_dsi_dphy_ops = { .exit = dw_mipi_dsi_dphy_exit, }; +static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev) +{ + struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev); + int ret; + + /* + * Re-configure DSI state, if we were previously initialized. We need + * to do this before rockchip_drm_drv tries to re-enable() any panels. + */ + if (dsi->dsi_bound) { + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + return ret; + } + + dw_mipi_dsi_rockchip_config(dsi); + if (dsi->slave) + dw_mipi_dsi_rockchip_config(dsi->slave); + + clk_disable_unprepare(dsi->grf_clk); + } + + return 0; +} + +static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume) +}; + static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1396,14 +1435,10 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "Failed to probe dw_mipi_dsi: %d\n", ret); - goto err_clkdisable; + return ret; } return 0; - -err_clkdisable: - clk_disable_unprepare(dsi->pllref_clk); - return ret; } static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev) @@ -1592,6 +1627,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .remove = dw_mipi_dsi_rockchip_remove, .driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, + .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", }, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index e4ebe60b3cc1..bec207de4544 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -26,7 +26,6 @@ #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" -#include "rockchip_drm_fbdev.h" #include "rockchip_drm_gem.h" #define DRIVER_NAME "rockchip" @@ -159,10 +158,6 @@ static int rockchip_drm_bind(struct device *dev) drm_mode_config_reset(drm_dev); - ret = rockchip_drm_fbdev_init(drm_dev); - if (ret) - goto err_unbind_all; - /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -170,10 +165,11 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_kms_helper_poll_fini; + drm_fbdev_generic_setup(drm_dev, 0); + return 0; err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); - rockchip_drm_fbdev_fini(drm_dev); err_unbind_all: component_unbind_all(dev, drm_dev); err_iommu_cleanup: @@ -189,7 +185,6 @@ static void rockchip_drm_unbind(struct device *dev) drm_dev_unregister(drm_dev); - rockchip_drm_fbdev_fini(drm_dev); drm_kms_helper_poll_fini(drm_dev); drm_atomic_helper_shutdown(drm_dev); @@ -199,25 +194,15 @@ static void rockchip_drm_unbind(struct device *dev) drm_dev_put(drm_dev); } -static const struct file_operations rockchip_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = rockchip_gem_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .release = drm_release, -}; +DEFINE_DRM_GEM_FOPS(rockchip_drm_driver_fops); static const struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, .dumb_create = rockchip_gem_dumb_create, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table, - .gem_prime_mmap = rockchip_gem_mmap_buf, + .gem_prime_mmap = drm_gem_prime_mmap, .fops = &rockchip_drm_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index aa0909e8edf9..143a48330f84 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -43,8 +43,6 @@ struct rockchip_crtc_state { * @mm_lock: protect drm_mm on multi-threads. */ struct rockchip_drm_private { - struct drm_fb_helper fbdev_helper; - struct drm_gem_object *fbdev_bo; struct iommu_domain *domain; struct mutex mm_lock; struct drm_mm mm; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c deleted file mode 100644 index 2fdc455c4ad7..000000000000 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ /dev/null @@ -1,163 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd - * Author:Mark Yao <mark.yao@rock-chips.com> - */ - -#include <drm/drm.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> -#include <drm/drm_probe_helper.h> - -#include "rockchip_drm_drv.h" -#include "rockchip_drm_gem.h" -#include "rockchip_drm_fb.h" -#include "rockchip_drm_fbdev.h" - -#define PREFERRED_BPP 32 -#define to_drm_private(x) \ - container_of(x, struct rockchip_drm_private, fbdev_helper) - -static int rockchip_fbdev_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - struct drm_fb_helper *helper = info->par; - struct rockchip_drm_private *private = to_drm_private(helper); - - return rockchip_gem_mmap_buf(private->fbdev_bo, vma); -} - -static const struct fb_ops rockchip_drm_fbdev_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_mmap = rockchip_fbdev_mmap, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, -}; - -static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct rockchip_drm_private *private = to_drm_private(helper); - struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - struct drm_device *dev = helper->dev; - struct rockchip_gem_object *rk_obj; - struct drm_framebuffer *fb; - unsigned int bytes_per_pixel; - unsigned long offset; - struct fb_info *fbi; - size_t size; - int ret; - - bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel; - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - size = mode_cmd.pitches[0] * mode_cmd.height; - - rk_obj = rockchip_gem_create_object(dev, size, true); - if (IS_ERR(rk_obj)) - return -ENOMEM; - - private->fbdev_bo = &rk_obj->base; - - fbi = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(fbi)) { - DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info.\n"); - ret = PTR_ERR(fbi); - goto out; - } - - helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, - private->fbdev_bo); - if (IS_ERR(helper->fb)) { - DRM_DEV_ERROR(dev->dev, - "Failed to allocate DRM framebuffer.\n"); - ret = PTR_ERR(helper->fb); - goto out; - } - - fbi->fbops = &rockchip_drm_fbdev_ops; - - fb = helper->fb; - drm_fb_helper_fill_info(fbi, helper, sizes); - - offset = fbi->var.xoffset * bytes_per_pixel; - offset += fbi->var.yoffset * fb->pitches[0]; - - dev->mode_config.fb_base = 0; - fbi->screen_base = rk_obj->kvaddr + offset; - fbi->screen_size = rk_obj->base.size; - fbi->fix.smem_len = rk_obj->base.size; - - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", - fb->width, fb->height, fb->format->depth, - rk_obj->kvaddr, - offset, size); - - return 0; - -out: - rockchip_gem_free_object(&rk_obj->base); - return ret; -} - -static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = { - .fb_probe = rockchip_drm_fbdev_create, -}; - -int rockchip_drm_fbdev_init(struct drm_device *dev) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct drm_fb_helper *helper; - int ret; - - if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) - return -EINVAL; - - helper = &private->fbdev_helper; - - drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs); - - ret = drm_fb_helper_init(dev, helper); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, - "Failed to initialize drm fb helper - %d.\n", - ret); - return ret; - } - - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); - if (ret < 0) { - DRM_DEV_ERROR(dev->dev, - "Failed to set initial hw config - %d.\n", - ret); - goto err_drm_fb_helper_fini; - } - - return 0; - -err_drm_fb_helper_fini: - drm_fb_helper_fini(helper); - return ret; -} - -void rockchip_drm_fbdev_fini(struct drm_device *dev) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct drm_fb_helper *helper; - - helper = &private->fbdev_helper; - - drm_fb_helper_unregister_fbi(helper); - - if (helper->fb) - drm_framebuffer_put(helper->fb); - - drm_fb_helper_fini(helper); -} diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h deleted file mode 100644 index 5fb7ac2371a8..000000000000 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd - * Author:Mark Yao <mark.yao@rock-chips.com> - */ - -#ifndef _ROCKCHIP_DRM_FBDEV_H -#define _ROCKCHIP_DRM_FBDEV_H - -#ifdef CONFIG_DRM_FBDEV_EMULATION -int rockchip_drm_fbdev_init(struct drm_device *dev); -void rockchip_drm_fbdev_fini(struct drm_device *dev); -#else -static inline int rockchip_drm_fbdev_init(struct drm_device *dev) -{ - return 0; -} - -static inline void rockchip_drm_fbdev_fini(struct drm_device *dev) -{ -} -#endif - -#endif /* _ROCKCHIP_DRM_FBDEV_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 7971f57436dd..63eb73b624aa 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -241,11 +241,21 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); /* + * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the + * whole buffer from the start. + */ + vma->vm_pgoff = 0; + + /* * We allocated a struct page table for rk_obj, so clear * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags &= ~VM_PFNMAP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + if (rk_obj->pages) ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); else @@ -257,39 +267,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, return ret; } -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret) - return ret; - - return rockchip_drm_gem_object_mmap(obj, vma); -} - -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_object *obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - /* - * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the - * whole buffer from the start. - */ - vma->vm_pgoff = 0; - - obj = vma->vm_private_data; - - return rockchip_drm_gem_object_mmap(obj, vma); -} - static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) { drm_gem_object_release(&rk_obj->base); @@ -301,6 +278,7 @@ static const struct drm_gem_object_funcs rockchip_gem_object_funcs = { .get_sg_table = rockchip_gem_prime_get_sg_table, .vmap = rockchip_gem_prime_vmap, .vunmap = rockchip_gem_prime_vunmap, + .mmap = rockchip_drm_gem_object_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 5a70a56cd406..47c1861eece0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -34,13 +34,6 @@ rockchip_gem_prime_import_sg_table(struct drm_device *dev, int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); -/* drm driver mmap file operations */ -int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma); - -/* mmap a gem object to userspace. */ -int rockchip_gem_mmap_buf(struct drm_gem_object *obj, - struct vm_area_struct *vma); - struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, bool alloc_kmap); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ba9e14da41b4..3e8d9e2d1b67 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -726,7 +726,9 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc, spin_unlock(&vop->reg_lock); - wait_for_completion(&vop->dsp_hold_completion); + if (!wait_for_completion_timeout(&vop->dsp_hold_completion, + msecs_to_jiffies(200))) + WARN(1, "%s: timed out waiting for DSP hold", crtc->name); vop_dsp_hold_valid_irq_disable(vop); @@ -1174,26 +1176,24 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, * * Action plan: * - * 1. When DRM gives us a mode, we should add 999 Hz to it. That way - * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to - * make 60000 kHz then the clock framework will actually give us - * the right clock. + * 1. Try to set the exact rate first, and confirm the clock framework + * can provide it. * - * NOTE: if the PLL (maybe through a divider) could actually make - * a clock rate 999 Hz higher instead of the one we want then this - * could be a problem. Unfortunately there's not much we can do - * since it's baked into DRM to use kHz. It shouldn't matter in - * practice since Rockchip PLLs are controlled by tables and - * even if there is a divider in the middle I wouldn't expect PLL - * rates in the table that are just a few kHz different. + * 2. If the clock framework cannot provide the exact rate, we should + * add 999 Hz to the requested rate. That way if the clock we need + * is 60000001 Hz (~60 MHz) and DRM tells us to make 60000 kHz then + * the clock framework will actually give us the right clock. * - * 2. Get the clock framework to round the rate for us to tell us + * 3. Get the clock framework to round the rate for us to tell us * what it will actually make. * - * 3. Store the rounded up rate so that we don't need to worry about + * 4. Store the rounded up rate so that we don't need to worry about * this in the actual clk_set_rate(). */ - rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999); + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000); + if (rate / 1000 != adjusted_mode->clock) + rate = clk_round_rate(vop->dclk, + adjusted_mode->clock * 1000 + 999); adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); return true; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 27e1573af96e..191c56064f19 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -190,6 +190,16 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush); +static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) +{ + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); + + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +} + + /* Signal the scheduler finished fence when the entity in question is killed. */ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct dma_fence_cb *cb) @@ -197,9 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - job->sched->ops->free_job(job); + init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); + irq_work_queue(&job->work); } static struct dma_fence * diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 5bc5f775abe1..f91fb31ab7a7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -704,9 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, int ret; dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { + /* Make sure to grab an additional ref on the added fence */ + dma_fence_get(fence); ret = drm_sched_job_add_dependency(job, fence); - if (ret) + if (ret) { + dma_fence_put(fence); return ret; + } } return 0; } diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c index 1c19a5d3eefb..8d8d8e214c28 100644 --- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c +++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c @@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state) mock_device.driver = &mock_driver; mock_device.mode_config.prop_fb_damage_clips = &mock_prop; mock_plane.dev = &mock_device; + mock_obj_props.count = 0; mock_plane.base.properties = &mock_obj_props; mock_prop.base.id = 1; /* 0 is an invalid id */ mock_prop.dev = &mock_device; diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index e2a6c82c8252..288b838a904a 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -5,7 +5,6 @@ config DRM_SHMOBILE depends on ARCH_SHMOBILE || COMPILE_TEST select BACKLIGHT_CLASS_DEVICE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help Choose this option if you have an SH Mobile chipset. diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 7db01904d18d..80078a9fd7f6 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -192,7 +192,6 @@ static int shmob_drm_probe(struct platform_device *pdev) struct shmob_drm_platform_data *pdata = pdev->dev.platform_data; struct shmob_drm_device *sdev; struct drm_device *ddev; - struct resource *res; unsigned int i; int ret; @@ -215,8 +214,7 @@ static int shmob_drm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sdev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - sdev->mmio = devm_ioremap_resource(&pdev->dev, res); + sdev->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sdev->mmio)) return PTR_ERR(sdev->mmio); diff --git a/drivers/gpu/drm/sprd/Kconfig b/drivers/gpu/drm/sprd/Kconfig new file mode 100644 index 000000000000..3edeaeca0e65 --- /dev/null +++ b/drivers/gpu/drm/sprd/Kconfig @@ -0,0 +1,13 @@ +config DRM_SPRD + tristate "DRM Support for Unisoc SoCs Platform" + depends on ARCH_SPRD || COMPILE_TEST + depends on DRM && OF + select DRM_GEM_CMA_HELPER + select DRM_KMS_CMA_HELPER + select DRM_KMS_HELPER + select DRM_MIPI_DSI + select VIDEOMODE_HELPERS + help + Choose this option if you have a Unisoc chipset. + If M is selected the module will be called sprd_drm. + diff --git a/drivers/gpu/drm/sprd/Makefile b/drivers/gpu/drm/sprd/Makefile new file mode 100644 index 000000000000..e82e6a6f89de --- /dev/null +++ b/drivers/gpu/drm/sprd/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +sprd-drm-y := sprd_drm.o \ + sprd_dpu.o \ + sprd_dsi.o \ + megacores_pll.o + +obj-$(CONFIG_DRM_SPRD) += sprd-drm.o
\ No newline at end of file diff --git a/drivers/gpu/drm/sprd/megacores_pll.c b/drivers/gpu/drm/sprd/megacores_pll.c new file mode 100644 index 000000000000..3091dfdc11e3 --- /dev/null +++ b/drivers/gpu/drm/sprd/megacores_pll.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <asm/div64.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/regmap.h> +#include <linux/string.h> + +#include "sprd_dsi.h" + +#define L 0 +#define H 1 +#define CLK 0 +#define DATA 1 +#define INFINITY 0xffffffff +#define MIN_OUTPUT_FREQ (100) + +#define AVERAGE(a, b) (min(a, b) + abs((b) - (a)) / 2) + +/* sharkle */ +#define VCO_BAND_LOW 750 +#define VCO_BAND_MID 1100 +#define VCO_BAND_HIGH 1500 +#define PHY_REF_CLK 26000 + +static int dphy_calc_pll_param(struct dphy_pll *pll) +{ + const u32 khz = 1000; + const u32 mhz = 1000000; + const unsigned long long factor = 100; + unsigned long long tmp; + int i; + + pll->potential_fvco = pll->freq / khz; + pll->ref_clk = PHY_REF_CLK / khz; + + for (i = 0; i < 4; ++i) { + if (pll->potential_fvco >= VCO_BAND_LOW && + pll->potential_fvco <= VCO_BAND_HIGH) { + pll->fvco = pll->potential_fvco; + pll->out_sel = BIT(i); + break; + } + pll->potential_fvco <<= 1; + } + if (pll->fvco == 0) + return -EINVAL; + + if (pll->fvco >= VCO_BAND_LOW && pll->fvco <= VCO_BAND_MID) { + /* vco band control */ + pll->vco_band = 0x0; + /* low pass filter control */ + pll->lpf_sel = 1; + } else if (pll->fvco > VCO_BAND_MID && pll->fvco <= VCO_BAND_HIGH) { + pll->vco_band = 0x1; + pll->lpf_sel = 0; + } else { + return -EINVAL; + } + + pll->nint = pll->fvco / pll->ref_clk; + tmp = pll->fvco * factor * mhz; + do_div(tmp, pll->ref_clk); + tmp = tmp - pll->nint * factor * mhz; + tmp *= BIT(20); + do_div(tmp, 100000000); + pll->kint = (u32)tmp; + pll->refin = 3; /* pre-divider bypass */ + pll->sdm_en = true; /* use fraction N PLL */ + pll->fdk_s = 0x1; /* fraction */ + pll->cp_s = 0x0; + pll->det_delay = 0x1; + + return 0; +} + +static void dphy_set_pll_reg(struct dphy_pll *pll, struct regmap *regmap) +{ + u8 reg_val[9] = {0}; + int i; + + u8 reg_addr[] = { + 0x03, 0x04, 0x06, 0x08, 0x09, + 0x0a, 0x0b, 0x0e, 0x0f + }; + + reg_val[0] = 1 | (1 << 1) | (pll->lpf_sel << 2); + reg_val[1] = pll->div | (1 << 3) | (pll->cp_s << 5) | (pll->fdk_s << 7); + reg_val[2] = pll->nint; + reg_val[3] = pll->vco_band | (pll->sdm_en << 1) | (pll->refin << 2); + reg_val[4] = pll->kint >> 12; + reg_val[5] = pll->kint >> 4; + reg_val[6] = pll->out_sel | ((pll->kint << 4) & 0xf); + reg_val[7] = 1 << 4; + reg_val[8] = pll->det_delay; + + for (i = 0; i < sizeof(reg_addr); ++i) { + regmap_write(regmap, reg_addr[i], reg_val[i]); + DRM_DEBUG("%02x: %02x\n", reg_addr[i], reg_val[i]); + } +} + +int dphy_pll_config(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + struct regmap *regmap = ctx->regmap; + struct dphy_pll *pll = &ctx->pll; + int ret; + + pll->freq = dsi->slave->hs_rate; + + /* FREQ = 26M * (NINT + KINT / 2^20) / out_sel */ + ret = dphy_calc_pll_param(pll); + if (ret) { + drm_err(dsi->drm, "failed to calculate dphy pll parameters\n"); + return ret; + } + dphy_set_pll_reg(pll, regmap); + + return 0; +} + +static void dphy_set_timing_reg(struct regmap *regmap, int type, u8 val[]) +{ + switch (type) { + case REQUEST_TIME: + regmap_write(regmap, 0x31, val[CLK]); + regmap_write(regmap, 0x41, val[DATA]); + regmap_write(regmap, 0x51, val[DATA]); + regmap_write(regmap, 0x61, val[DATA]); + regmap_write(regmap, 0x71, val[DATA]); + + regmap_write(regmap, 0x90, val[CLK]); + regmap_write(regmap, 0xa0, val[DATA]); + regmap_write(regmap, 0xb0, val[DATA]); + regmap_write(regmap, 0xc0, val[DATA]); + regmap_write(regmap, 0xd0, val[DATA]); + break; + case PREPARE_TIME: + regmap_write(regmap, 0x32, val[CLK]); + regmap_write(regmap, 0x42, val[DATA]); + regmap_write(regmap, 0x52, val[DATA]); + regmap_write(regmap, 0x62, val[DATA]); + regmap_write(regmap, 0x72, val[DATA]); + + regmap_write(regmap, 0x91, val[CLK]); + regmap_write(regmap, 0xa1, val[DATA]); + regmap_write(regmap, 0xb1, val[DATA]); + regmap_write(regmap, 0xc1, val[DATA]); + regmap_write(regmap, 0xd1, val[DATA]); + break; + case ZERO_TIME: + regmap_write(regmap, 0x33, val[CLK]); + regmap_write(regmap, 0x43, val[DATA]); + regmap_write(regmap, 0x53, val[DATA]); + regmap_write(regmap, 0x63, val[DATA]); + regmap_write(regmap, 0x73, val[DATA]); + + regmap_write(regmap, 0x92, val[CLK]); + regmap_write(regmap, 0xa2, val[DATA]); + regmap_write(regmap, 0xb2, val[DATA]); + regmap_write(regmap, 0xc2, val[DATA]); + regmap_write(regmap, 0xd2, val[DATA]); + break; + case TRAIL_TIME: + regmap_write(regmap, 0x34, val[CLK]); + regmap_write(regmap, 0x44, val[DATA]); + regmap_write(regmap, 0x54, val[DATA]); + regmap_write(regmap, 0x64, val[DATA]); + regmap_write(regmap, 0x74, val[DATA]); + + regmap_write(regmap, 0x93, val[CLK]); + regmap_write(regmap, 0xa3, val[DATA]); + regmap_write(regmap, 0xb3, val[DATA]); + regmap_write(regmap, 0xc3, val[DATA]); + regmap_write(regmap, 0xd3, val[DATA]); + break; + case EXIT_TIME: + regmap_write(regmap, 0x36, val[CLK]); + regmap_write(regmap, 0x46, val[DATA]); + regmap_write(regmap, 0x56, val[DATA]); + regmap_write(regmap, 0x66, val[DATA]); + regmap_write(regmap, 0x76, val[DATA]); + + regmap_write(regmap, 0x95, val[CLK]); + regmap_write(regmap, 0xA5, val[DATA]); + regmap_write(regmap, 0xB5, val[DATA]); + regmap_write(regmap, 0xc5, val[DATA]); + regmap_write(regmap, 0xd5, val[DATA]); + break; + case CLKPOST_TIME: + regmap_write(regmap, 0x35, val[CLK]); + regmap_write(regmap, 0x94, val[CLK]); + break; + + /* the following just use default value */ + case SETTLE_TIME: + fallthrough; + case TA_GET: + fallthrough; + case TA_GO: + fallthrough; + case TA_SURE: + fallthrough; + default: + break; + } +} + +void dphy_timing_config(struct dsi_context *ctx) +{ + struct regmap *regmap = ctx->regmap; + struct dphy_pll *pll = &ctx->pll; + const u32 factor = 2; + const u32 scale = 100; + u32 t_ui, t_byteck, t_half_byteck; + u32 range[2], constant; + u8 val[2]; + u32 tmp = 0; + + /* t_ui: 1 ui, byteck: 8 ui, half byteck: 4 ui */ + t_ui = 1000 * scale / (pll->freq / 1000); + t_byteck = t_ui << 3; + t_half_byteck = t_ui << 2; + constant = t_ui << 1; + + /* REQUEST_TIME: HS T-LPX: LP-01 + * For T-LPX, mipi spec defined min value is 50ns, + * but maybe it shouldn't be too small, because BTA, + * LP-10, LP-00, LP-01, all of this is related to T-LPX. + */ + range[L] = 50 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, REQUEST_TIME, val); + + /* PREPARE_TIME: HS sequence: LP-00 */ + range[L] = 38 * scale; + range[H] = 95 * scale; + tmp = AVERAGE(range[L], range[H]); + val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; + range[L] = 40 * scale + 4 * t_ui; + range[H] = 85 * scale + 6 * t_ui; + tmp |= AVERAGE(range[L], range[H]) << 16; + val[DATA] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; + dphy_set_timing_reg(regmap, PREPARE_TIME, val); + + /* ZERO_TIME: HS-ZERO */ + range[L] = 300 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor + (tmp & 0xffff) + - 525 * t_byteck / 100, t_byteck) - 2; + range[L] = 145 * scale + 10 * t_ui; + val[DATA] = DIV_ROUND_UP(range[L] * factor + + ((tmp >> 16) & 0xffff) - 525 * t_byteck / 100, + t_byteck) - 2; + dphy_set_timing_reg(regmap, ZERO_TIME, val); + + /* TRAIL_TIME: HS-TRAIL */ + range[L] = 60 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor - constant, t_half_byteck); + range[L] = max(8 * t_ui, 60 * scale + 4 * t_ui); + val[DATA] = DIV_ROUND_UP(range[L] * 3 / 2 - constant, t_half_byteck) - 2; + dphy_set_timing_reg(regmap, TRAIL_TIME, val); + + /* EXIT_TIME: */ + range[L] = 100 * scale; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, EXIT_TIME, val); + + /* CLKPOST_TIME: */ + range[L] = 60 * scale + 52 * t_ui; + range[H] = INFINITY; + val[CLK] = DIV_ROUND_UP(range[L] * factor, t_byteck) - 2; + val[DATA] = val[CLK]; + dphy_set_timing_reg(regmap, CLKPOST_TIME, val); + + /* SETTLE_TIME: + * This time is used for receiver. So for transmitter, + * it can be ignored. + */ + + /* TA_GO: + * transmitter drives bridge state(LP-00) before releasing control, + * reg 0x1f default value: 0x04, which is good. + */ + + /* TA_SURE: + * After LP-10 state and before bridge state(LP-00), + * reg 0x20 default value: 0x01, which is good. + */ + + /* TA_GET: + * receiver drives Bridge state(LP-00) before releasing control + * reg 0x21 default value: 0x03, which is good. + */ +} diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c new file mode 100644 index 000000000000..06a3414ee43a --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -0,0 +1,880 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/delay.h> +#include <linux/dma-buf.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_cma_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_plane_helper.h> + +#include "sprd_drm.h" +#include "sprd_dpu.h" +#include "sprd_dsi.h" + +/* Global control registers */ +#define REG_DPU_CTRL 0x04 +#define REG_DPU_CFG0 0x08 +#define REG_PANEL_SIZE 0x20 +#define REG_BLEND_SIZE 0x24 +#define REG_BG_COLOR 0x2C + +/* Layer0 control registers */ +#define REG_LAY_BASE_ADDR0 0x30 +#define REG_LAY_BASE_ADDR1 0x34 +#define REG_LAY_BASE_ADDR2 0x38 +#define REG_LAY_CTRL 0x40 +#define REG_LAY_SIZE 0x44 +#define REG_LAY_PITCH 0x48 +#define REG_LAY_POS 0x4C +#define REG_LAY_ALPHA 0x50 +#define REG_LAY_CROP_START 0x5C + +/* Interrupt control registers */ +#define REG_DPU_INT_EN 0x1E0 +#define REG_DPU_INT_CLR 0x1E4 +#define REG_DPU_INT_STS 0x1E8 + +/* DPI control registers */ +#define REG_DPI_CTRL 0x1F0 +#define REG_DPI_H_TIMING 0x1F4 +#define REG_DPI_V_TIMING 0x1F8 + +/* MMU control registers */ +#define REG_MMU_EN 0x800 +#define REG_MMU_VPN_RANGE 0x80C +#define REG_MMU_PPN1 0x83C +#define REG_MMU_RANGE1 0x840 +#define REG_MMU_PPN2 0x844 +#define REG_MMU_RANGE2 0x848 + +/* Global control bits */ +#define BIT_DPU_RUN BIT(0) +#define BIT_DPU_STOP BIT(1) +#define BIT_DPU_REG_UPDATE BIT(2) +#define BIT_DPU_IF_EDPI BIT(0) + +/* Layer control bits */ +#define BIT_DPU_LAY_EN BIT(0) +#define BIT_DPU_LAY_LAYER_ALPHA (0x01 << 2) +#define BIT_DPU_LAY_COMBO_ALPHA (0x02 << 2) +#define BIT_DPU_LAY_FORMAT_YUV422_2PLANE (0x00 << 4) +#define BIT_DPU_LAY_FORMAT_YUV420_2PLANE (0x01 << 4) +#define BIT_DPU_LAY_FORMAT_YUV420_3PLANE (0x02 << 4) +#define BIT_DPU_LAY_FORMAT_ARGB8888 (0x03 << 4) +#define BIT_DPU_LAY_FORMAT_RGB565 (0x04 << 4) +#define BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3 (0x00 << 8) +#define BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0 (0x01 << 8) +#define BIT_DPU_LAY_NO_SWITCH (0x00 << 10) +#define BIT_DPU_LAY_RB_OR_UV_SWITCH (0x01 << 10) +#define BIT_DPU_LAY_MODE_BLEND_NORMAL (0x00 << 16) +#define BIT_DPU_LAY_MODE_BLEND_PREMULT (0x01 << 16) +#define BIT_DPU_LAY_ROTATION_0 (0x00 << 20) +#define BIT_DPU_LAY_ROTATION_90 (0x01 << 20) +#define BIT_DPU_LAY_ROTATION_180 (0x02 << 20) +#define BIT_DPU_LAY_ROTATION_270 (0x03 << 20) +#define BIT_DPU_LAY_ROTATION_0_M (0x04 << 20) +#define BIT_DPU_LAY_ROTATION_90_M (0x05 << 20) +#define BIT_DPU_LAY_ROTATION_180_M (0x06 << 20) +#define BIT_DPU_LAY_ROTATION_270_M (0x07 << 20) + +/* Interrupt control & status bits */ +#define BIT_DPU_INT_DONE BIT(0) +#define BIT_DPU_INT_TE BIT(1) +#define BIT_DPU_INT_ERR BIT(2) +#define BIT_DPU_INT_UPDATE_DONE BIT(4) +#define BIT_DPU_INT_VSYNC BIT(5) + +/* DPI control bits */ +#define BIT_DPU_EDPI_TE_EN BIT(8) +#define BIT_DPU_EDPI_FROM_EXTERNAL_PAD BIT(10) +#define BIT_DPU_DPI_HALT_EN BIT(16) + +static const u32 layer_fmts[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, + DRM_FORMAT_NV16, + DRM_FORMAT_NV61, + DRM_FORMAT_YUV420, + DRM_FORMAT_YVU420, +}; + +struct sprd_plane { + struct drm_plane base; +}; + +static int dpu_wait_stop_done(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + int rc; + + if (ctx->stopped) + return 0; + + rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_stop, + msecs_to_jiffies(500)); + ctx->evt_stop = false; + + ctx->stopped = true; + + if (!rc) { + drm_err(dpu->drm, "dpu wait for stop done time out!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int dpu_wait_update_done(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + int rc; + + ctx->evt_update = false; + + rc = wait_event_interruptible_timeout(ctx->wait_queue, ctx->evt_update, + msecs_to_jiffies(500)); + + if (!rc) { + drm_err(dpu->drm, "dpu wait for reg update done time out!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static u32 drm_format_to_dpu(struct drm_framebuffer *fb) +{ + u32 format = 0; + + switch (fb->format->format) { + case DRM_FORMAT_BGRA8888: + /* BGRA8888 -> ARGB8888 */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + /* RGBA8888 -> ABGR8888 */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + fallthrough; + case DRM_FORMAT_ABGR8888: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_ARGB8888: + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_XBGR8888: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_XRGB8888: + format |= BIT_DPU_LAY_FORMAT_ARGB8888; + break; + case DRM_FORMAT_BGR565: + /* RB switch */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + fallthrough; + case DRM_FORMAT_RGB565: + format |= BIT_DPU_LAY_FORMAT_RGB565; + break; + case DRM_FORMAT_NV12: + /* 2-Lane: Yuv420 */ + format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_NV21: + /* 2-Lane: Yuv420 */ + format |= BIT_DPU_LAY_FORMAT_YUV420_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + case DRM_FORMAT_NV16: + /* 2-Lane: Yuv422 */ + format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B3B2B1B0; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + case DRM_FORMAT_NV61: + /* 2-Lane: Yuv422 */ + format |= BIT_DPU_LAY_FORMAT_YUV422_2PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_YUV420: + format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_NO_SWITCH; + break; + case DRM_FORMAT_YVU420: + format |= BIT_DPU_LAY_FORMAT_YUV420_3PLANE; + /* Y endian */ + format |= BIT_DPU_LAY_DATA_ENDIAN_B0B1B2B3; + /* UV endian */ + format |= BIT_DPU_LAY_RB_OR_UV_SWITCH; + break; + default: + break; + } + + return format; +} + +static u32 drm_rotation_to_dpu(struct drm_plane_state *state) +{ + u32 rotation = 0; + + switch (state->rotation) { + default: + case DRM_MODE_ROTATE_0: + rotation = BIT_DPU_LAY_ROTATION_0; + break; + case DRM_MODE_ROTATE_90: + rotation = BIT_DPU_LAY_ROTATION_90; + break; + case DRM_MODE_ROTATE_180: + rotation = BIT_DPU_LAY_ROTATION_180; + break; + case DRM_MODE_ROTATE_270: + rotation = BIT_DPU_LAY_ROTATION_270; + break; + case DRM_MODE_REFLECT_Y: + rotation = BIT_DPU_LAY_ROTATION_180_M; + break; + case (DRM_MODE_REFLECT_Y | DRM_MODE_ROTATE_90): + rotation = BIT_DPU_LAY_ROTATION_90_M; + break; + case DRM_MODE_REFLECT_X: + rotation = BIT_DPU_LAY_ROTATION_0_M; + break; + case (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90): + rotation = BIT_DPU_LAY_ROTATION_270_M; + break; + } + + return rotation; +} + +static u32 drm_blend_to_dpu(struct drm_plane_state *state) +{ + u32 blend = 0; + + switch (state->pixel_blend_mode) { + case DRM_MODE_BLEND_COVERAGE: + /* alpha mode select - combo alpha */ + blend |= BIT_DPU_LAY_COMBO_ALPHA; + /* Normal mode */ + blend |= BIT_DPU_LAY_MODE_BLEND_NORMAL; + break; + case DRM_MODE_BLEND_PREMULTI: + /* alpha mode select - combo alpha */ + blend |= BIT_DPU_LAY_COMBO_ALPHA; + /* Pre-mult mode */ + blend |= BIT_DPU_LAY_MODE_BLEND_PREMULT; + break; + case DRM_MODE_BLEND_PIXEL_NONE: + default: + /* don't do blending, maybe RGBX */ + /* alpha mode select - layer alpha */ + blend |= BIT_DPU_LAY_LAYER_ALPHA; + break; + } + + return blend; +} + +static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state) +{ + struct dpu_context *ctx = &dpu->ctx; + struct drm_gem_cma_object *cma_obj; + struct drm_framebuffer *fb = state->fb; + u32 addr, size, offset, pitch, blend, format, rotation; + u32 src_x = state->src_x >> 16; + u32 src_y = state->src_y >> 16; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + u32 dst_x = state->crtc_x; + u32 dst_y = state->crtc_y; + u32 alpha = state->alpha; + u32 index = state->zpos; + int i; + + offset = (dst_x & 0xffff) | (dst_y << 16); + size = (src_w & 0xffff) | (src_h << 16); + + for (i = 0; i < fb->format->num_planes; i++) { + cma_obj = drm_fb_cma_get_gem_obj(fb, i); + addr = cma_obj->paddr + fb->offsets[i]; + + if (i == 0) + layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index); + else if (i == 1) + layer_reg_wr(ctx, REG_LAY_BASE_ADDR1, addr, index); + else + layer_reg_wr(ctx, REG_LAY_BASE_ADDR2, addr, index); + } + + if (fb->format->num_planes == 3) { + /* UV pitch is 1/2 of Y pitch */ + pitch = (fb->pitches[0] / fb->format->cpp[0]) | + (fb->pitches[0] / fb->format->cpp[0] << 15); + } else { + pitch = fb->pitches[0] / fb->format->cpp[0]; + } + + layer_reg_wr(ctx, REG_LAY_POS, offset, index); + layer_reg_wr(ctx, REG_LAY_SIZE, size, index); + layer_reg_wr(ctx, REG_LAY_CROP_START, + src_y << 16 | src_x, index); + layer_reg_wr(ctx, REG_LAY_ALPHA, alpha, index); + layer_reg_wr(ctx, REG_LAY_PITCH, pitch, index); + + format = drm_format_to_dpu(fb); + blend = drm_blend_to_dpu(state); + rotation = drm_rotation_to_dpu(state); + + layer_reg_wr(ctx, REG_LAY_CTRL, BIT_DPU_LAY_EN | + format | + blend | + rotation, + index); +} + +static void sprd_dpu_flip(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + /* + * Make sure the dpu is in stop status. DPU has no shadow + * registers in EDPI mode. So the config registers can only be + * updated in the rising edge of DPU_RUN bit. + */ + if (ctx->if_type == SPRD_DPU_IF_EDPI) + dpu_wait_stop_done(dpu); + + /* update trigger and wait */ + if (ctx->if_type == SPRD_DPU_IF_DPI) { + if (!ctx->stopped) { + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_REG_UPDATE); + dpu_wait_update_done(dpu); + } + + dpu_reg_set(ctx, REG_DPU_INT_EN, BIT_DPU_INT_ERR); + } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); + + ctx->stopped = false; + } +} + +static void sprd_dpu_init(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + u32 int_mask = 0; + + writel(0x00, ctx->base + REG_BG_COLOR); + writel(0x00, ctx->base + REG_MMU_EN); + writel(0x00, ctx->base + REG_MMU_PPN1); + writel(0xffff, ctx->base + REG_MMU_RANGE1); + writel(0x00, ctx->base + REG_MMU_PPN2); + writel(0xffff, ctx->base + REG_MMU_RANGE2); + writel(0x1ffff, ctx->base + REG_MMU_VPN_RANGE); + + if (ctx->if_type == SPRD_DPU_IF_DPI) { + /* use dpi as interface */ + dpu_reg_clr(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); + /* disable Halt function for SPRD DSI */ + dpu_reg_clr(ctx, REG_DPI_CTRL, BIT_DPU_DPI_HALT_EN); + /* select te from external pad */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); + + /* enable dpu update done INT */ + int_mask |= BIT_DPU_INT_UPDATE_DONE; + /* enable dpu done INT */ + int_mask |= BIT_DPU_INT_DONE; + /* enable dpu dpi vsync */ + int_mask |= BIT_DPU_INT_VSYNC; + /* enable dpu TE INT */ + int_mask |= BIT_DPU_INT_TE; + /* enable underflow err INT */ + int_mask |= BIT_DPU_INT_ERR; + } else if (ctx->if_type == SPRD_DPU_IF_EDPI) { + /* use edpi as interface */ + dpu_reg_set(ctx, REG_DPU_CFG0, BIT_DPU_IF_EDPI); + /* use external te */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_FROM_EXTERNAL_PAD); + /* enable te */ + dpu_reg_set(ctx, REG_DPI_CTRL, BIT_DPU_EDPI_TE_EN); + + /* enable stop done INT */ + int_mask |= BIT_DPU_INT_DONE; + /* enable TE INT */ + int_mask |= BIT_DPU_INT_TE; + } + + writel(int_mask, ctx->base + REG_DPU_INT_EN); +} + +static void sprd_dpu_fini(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + writel(0x00, ctx->base + REG_DPU_INT_EN); + writel(0xff, ctx->base + REG_DPU_INT_CLR); +} + +static void sprd_dpi_init(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + u32 reg_val; + u32 size; + + size = (ctx->vm.vactive << 16) | ctx->vm.hactive; + writel(size, ctx->base + REG_PANEL_SIZE); + writel(size, ctx->base + REG_BLEND_SIZE); + + if (ctx->if_type == SPRD_DPU_IF_DPI) { + /* set dpi timing */ + reg_val = ctx->vm.hsync_len << 0 | + ctx->vm.hback_porch << 8 | + ctx->vm.hfront_porch << 20; + writel(reg_val, ctx->base + REG_DPI_H_TIMING); + + reg_val = ctx->vm.vsync_len << 0 | + ctx->vm.vback_porch << 8 | + ctx->vm.vfront_porch << 20; + writel(reg_val, ctx->base + REG_DPI_V_TIMING); + } +} + +void sprd_dpu_run(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_RUN); + + ctx->stopped = false; +} + +void sprd_dpu_stop(struct sprd_dpu *dpu) +{ + struct dpu_context *ctx = &dpu->ctx; + + if (ctx->if_type == SPRD_DPU_IF_DPI) + dpu_reg_set(ctx, REG_DPU_CTRL, BIT_DPU_STOP); + + dpu_wait_stop_done(dpu); +} + +static int sprd_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_crtc_state *crtc_state; + u32 fmt; + + if (!plane_state->fb || !plane_state->crtc) + return 0; + + fmt = drm_format_to_dpu(plane_state->fb); + if (!fmt) + return -EINVAL; + + crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + return drm_atomic_helper_check_plane_state(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); +} + +static void sprd_plane_atomic_update(struct drm_plane *drm_plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + drm_plane); + struct sprd_dpu *dpu = to_sprd_crtc(new_state->crtc); + + /* start configure dpu layers */ + sprd_dpu_layer(dpu, new_state); +} + +static void sprd_plane_atomic_disable(struct drm_plane *drm_plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + drm_plane); + struct sprd_dpu *dpu = to_sprd_crtc(old_state->crtc); + + layer_reg_wr(&dpu->ctx, REG_LAY_CTRL, 0x00, old_state->zpos); +} + +static void sprd_plane_create_properties(struct sprd_plane *plane, int index) +{ + unsigned int supported_modes = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + + /* create rotation property */ + drm_plane_create_rotation_property(&plane->base, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_MASK | + DRM_MODE_REFLECT_MASK); + + /* create alpha property */ + drm_plane_create_alpha_property(&plane->base); + + /* create blend mode property */ + drm_plane_create_blend_mode_property(&plane->base, supported_modes); + + /* create zpos property */ + drm_plane_create_zpos_immutable_property(&plane->base, index); +} + +static const struct drm_plane_helper_funcs sprd_plane_helper_funcs = { + .atomic_check = sprd_plane_atomic_check, + .atomic_update = sprd_plane_atomic_update, + .atomic_disable = sprd_plane_atomic_disable, +}; + +static const struct drm_plane_funcs sprd_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct sprd_plane *sprd_planes_init(struct drm_device *drm) +{ + struct sprd_plane *plane, *primary; + enum drm_plane_type plane_type; + int i; + + for (i = 0; i < 6; i++) { + plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : + DRM_PLANE_TYPE_OVERLAY; + + plane = drmm_universal_plane_alloc(drm, struct sprd_plane, base, + 1, &sprd_plane_funcs, + layer_fmts, ARRAY_SIZE(layer_fmts), + NULL, plane_type, NULL); + if (IS_ERR(plane)) { + drm_err(drm, "failed to init drm plane: %d\n", i); + return plane; + } + + drm_plane_helper_add(&plane->base, &sprd_plane_helper_funcs); + + sprd_plane_create_properties(plane, i); + + if (i == 0) + primary = plane; + } + + return primary; +} + +static void sprd_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_encoder *encoder; + struct sprd_dsi *dsi; + + drm_display_mode_to_videomode(mode, &dpu->ctx.vm); + + drm_for_each_encoder_mask(encoder, crtc->dev, + crtc->state->encoder_mask) { + dsi = encoder_to_dsi(encoder); + + if (dsi->slave->mode_flags & MIPI_DSI_MODE_VIDEO) + dpu->ctx.if_type = SPRD_DPU_IF_DPI; + else + dpu->ctx.if_type = SPRD_DPU_IF_EDPI; + } + + sprd_dpi_init(dpu); +} + +static void sprd_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + sprd_dpu_init(dpu); + + drm_crtc_vblank_on(&dpu->base); +} + +static void sprd_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_device *drm = dpu->base.dev; + + drm_crtc_vblank_off(&dpu->base); + + sprd_dpu_fini(dpu); + + spin_lock_irq(&drm->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&drm->event_lock); +} + +static void sprd_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) + +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + struct drm_device *drm = dpu->base.dev; + + sprd_dpu_flip(dpu); + + spin_lock_irq(&drm->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&drm->event_lock); +} + +static int sprd_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + dpu_reg_set(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); + + return 0; +} + +static void sprd_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct sprd_dpu *dpu = to_sprd_crtc(crtc); + + dpu_reg_clr(&dpu->ctx, REG_DPU_INT_EN, BIT_DPU_INT_VSYNC); +} + +static const struct drm_crtc_helper_funcs sprd_crtc_helper_funcs = { + .mode_set_nofb = sprd_crtc_mode_set_nofb, + .atomic_flush = sprd_crtc_atomic_flush, + .atomic_enable = sprd_crtc_atomic_enable, + .atomic_disable = sprd_crtc_atomic_disable, +}; + +static const struct drm_crtc_funcs sprd_crtc_funcs = { + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = sprd_crtc_enable_vblank, + .disable_vblank = sprd_crtc_disable_vblank, +}; + +static struct sprd_dpu *sprd_crtc_init(struct drm_device *drm, + struct drm_plane *primary, struct device *dev) +{ + struct device_node *port; + struct sprd_dpu *dpu; + + dpu = drmm_crtc_alloc_with_planes(drm, struct sprd_dpu, base, + primary, NULL, + &sprd_crtc_funcs, NULL); + if (IS_ERR(dpu)) { + drm_err(drm, "failed to init crtc\n"); + return dpu; + } + drm_crtc_helper_add(&dpu->base, &sprd_crtc_helper_funcs); + + /* + * set crtc port so that drm_of_find_possible_crtcs call works + */ + port = of_graph_get_port_by_id(dev->of_node, 0); + if (!port) { + drm_err(drm, "failed to found crtc output port for %s\n", + dev->of_node->full_name); + return ERR_PTR(-EINVAL); + } + dpu->base.port = port; + of_node_put(port); + + return dpu; +} + +static irqreturn_t sprd_dpu_isr(int irq, void *data) +{ + struct sprd_dpu *dpu = data; + struct dpu_context *ctx = &dpu->ctx; + u32 reg_val, int_mask = 0; + + reg_val = readl(ctx->base + REG_DPU_INT_STS); + + /* disable err interrupt */ + if (reg_val & BIT_DPU_INT_ERR) { + int_mask |= BIT_DPU_INT_ERR; + drm_warn(dpu->drm, "Warning: dpu underflow!\n"); + } + + /* dpu update done isr */ + if (reg_val & BIT_DPU_INT_UPDATE_DONE) { + ctx->evt_update = true; + wake_up_interruptible_all(&ctx->wait_queue); + } + + /* dpu stop done isr */ + if (reg_val & BIT_DPU_INT_DONE) { + ctx->evt_stop = true; + wake_up_interruptible_all(&ctx->wait_queue); + } + + if (reg_val & BIT_DPU_INT_VSYNC) + drm_crtc_handle_vblank(&dpu->base); + + writel(reg_val, ctx->base + REG_DPU_INT_CLR); + dpu_reg_clr(ctx, REG_DPU_INT_EN, int_mask); + + return IRQ_HANDLED; +} + +static int sprd_dpu_context_init(struct sprd_dpu *dpu, + struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dpu_context *ctx = &dpu->ctx; + struct resource *res; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!ctx->base) { + dev_err(dev, "failed to map dpu registers\n"); + return -EFAULT; + } + + ctx->irq = platform_get_irq(pdev, 0); + if (ctx->irq < 0) { + dev_err(dev, "failed to get dpu irq\n"); + return ctx->irq; + } + + /* disable and clear interrupts before register dpu IRQ. */ + writel(0x00, ctx->base + REG_DPU_INT_EN); + writel(0xff, ctx->base + REG_DPU_INT_CLR); + + ret = devm_request_irq(dev, ctx->irq, sprd_dpu_isr, + IRQF_TRIGGER_NONE, "DPU", dpu); + if (ret) { + dev_err(dev, "failed to register dpu irq handler\n"); + return ret; + } + + init_waitqueue_head(&ctx->wait_queue); + + return 0; +} + +static int sprd_dpu_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = data; + struct sprd_dpu *dpu; + struct sprd_plane *plane; + int ret; + + plane = sprd_planes_init(drm); + if (IS_ERR(plane)) + return PTR_ERR(plane); + + dpu = sprd_crtc_init(drm, &plane->base, dev); + if (IS_ERR(dpu)) + return PTR_ERR(dpu); + + dpu->drm = drm; + dev_set_drvdata(dev, dpu); + + ret = sprd_dpu_context_init(dpu, dev); + if (ret) + return ret; + + return 0; +} + +static const struct component_ops dpu_component_ops = { + .bind = sprd_dpu_bind, +}; + +static const struct of_device_id dpu_match_table[] = { + { .compatible = "sprd,sharkl3-dpu" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, dpu_match_table); + +static int sprd_dpu_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &dpu_component_ops); +} + +static int sprd_dpu_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &dpu_component_ops); + + return 0; +} + +struct platform_driver sprd_dpu_driver = { + .probe = sprd_dpu_probe, + .remove = sprd_dpu_remove, + .driver = { + .name = "sprd-dpu-drv", + .of_match_table = dpu_match_table, + }, +}; + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc Display Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_dpu.h b/drivers/gpu/drm/sprd/sprd_dpu.h new file mode 100644 index 000000000000..157a78f24dc1 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dpu.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef __SPRD_DPU_H__ +#define __SPRD_DPU_H__ + +#include <linux/bug.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/string.h> +#include <video/videomode.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> +#include <uapi/drm/drm_mode.h> + +/* DPU Layer registers offset */ +#define DPU_LAY_REG_OFFSET 0x30 + +enum { + SPRD_DPU_IF_DPI, + SPRD_DPU_IF_EDPI, + SPRD_DPU_IF_LIMIT +}; + +/** + * Sprd DPU context structure + * + * @base: DPU controller base address + * @irq: IRQ number to install the handler for + * @if_type: The type of DPI interface, default is DPI mode. + * @vm: videomode structure to use for DPU and DPI initialization + * @stopped: indicates whether DPU are stopped + * @wait_queue: wait queue, used to wait for DPU shadow register update done and + * DPU stop register done interrupt signal. + * @evt_update: wait queue condition for DPU shadow register + * @evt_stop: wait queue condition for DPU stop register + */ +struct dpu_context { + void __iomem *base; + int irq; + u8 if_type; + struct videomode vm; + bool stopped; + wait_queue_head_t wait_queue; + bool evt_update; + bool evt_stop; +}; + +/** + * Sprd DPU device structure + * + * @crtc: crtc object + * @drm: A point to drm device + * @ctx: DPU's implementation specific context object + */ +struct sprd_dpu { + struct drm_crtc base; + struct drm_device *drm; + struct dpu_context ctx; +}; + +static inline struct sprd_dpu *to_sprd_crtc(struct drm_crtc *crtc) +{ + return container_of(crtc, struct sprd_dpu, base); +} + +static inline void +dpu_reg_set(struct dpu_context *ctx, u32 offset, u32 set_bits) +{ + u32 bits = readl_relaxed(ctx->base + offset); + + writel(bits | set_bits, ctx->base + offset); +} + +static inline void +dpu_reg_clr(struct dpu_context *ctx, u32 offset, u32 clr_bits) +{ + u32 bits = readl_relaxed(ctx->base + offset); + + writel(bits & ~clr_bits, ctx->base + offset); +} + +static inline u32 +layer_reg_rd(struct dpu_context *ctx, u32 offset, int index) +{ + u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET; + + return readl(ctx->base + layer_offset); +} + +static inline void +layer_reg_wr(struct dpu_context *ctx, u32 offset, u32 cfg_bits, int index) +{ + u32 layer_offset = offset + index * DPU_LAY_REG_OFFSET; + + writel(cfg_bits, ctx->base + layer_offset); +} + +void sprd_dpu_run(struct sprd_dpu *dpu); +void sprd_dpu_stop(struct sprd_dpu *dpu); + +#endif diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c new file mode 100644 index 000000000000..a077e2d4d721 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_drm.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_vblank.h> + +#include "sprd_drm.h" + +#define DRIVER_NAME "sprd" +#define DRIVER_DESC "Spreadtrum SoCs' DRM Driver" +#define DRIVER_DATE "20200201" +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 + +static const struct drm_mode_config_helper_funcs sprd_drm_mode_config_helper = { + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, +}; + +static const struct drm_mode_config_funcs sprd_drm_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void sprd_drm_mode_config_init(struct drm_device *drm) +{ + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + drm->mode_config.max_width = 8192; + drm->mode_config.max_height = 8192; + drm->mode_config.allow_fb_modifiers = true; + + drm->mode_config.funcs = &sprd_drm_mode_config_funcs; + drm->mode_config.helper_private = &sprd_drm_mode_config_helper; +} + +DEFINE_DRM_GEM_CMA_FOPS(sprd_drm_fops); + +static struct drm_driver sprd_drm_drv = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &sprd_drm_fops, + + /* GEM Operations */ + DRM_GEM_CMA_DRIVER_OPS, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, +}; + +static int sprd_drm_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm; + struct sprd_drm *sprd; + int ret; + + sprd = devm_drm_dev_alloc(dev, &sprd_drm_drv, struct sprd_drm, drm); + if (IS_ERR(sprd)) + return PTR_ERR(sprd); + + drm = &sprd->drm; + platform_set_drvdata(pdev, drm); + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + + sprd_drm_mode_config_init(drm); + + /* bind and init sub drivers */ + ret = component_bind_all(drm->dev, drm); + if (ret) { + drm_err(drm, "failed to bind all component.\n"); + return ret; + } + + /* vblank init */ + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (ret) { + drm_err(drm, "failed to initialize vblank.\n"); + goto err_unbind_all; + } + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(drm); + + /* init kms poll for handling hpd */ + drm_kms_helper_poll_init(drm); + + ret = drm_dev_register(drm, 0); + if (ret < 0) + goto err_kms_helper_poll_fini; + + return 0; + +err_kms_helper_poll_fini: + drm_kms_helper_poll_fini(drm); +err_unbind_all: + component_unbind_all(drm->dev, drm); + return ret; +} + +static void sprd_drm_unbind(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + + drm_dev_unregister(drm); + + drm_kms_helper_poll_fini(drm); + + component_unbind_all(drm->dev, drm); +} + +static const struct component_master_ops drm_component_ops = { + .bind = sprd_drm_bind, + .unbind = sprd_drm_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static int sprd_drm_probe(struct platform_device *pdev) +{ + return drm_of_component_probe(&pdev->dev, compare_of, &drm_component_ops); +} + +static int sprd_drm_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &drm_component_ops); + return 0; +} + +static void sprd_drm_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + if (!drm) { + drm_warn(drm, "drm device is not available, no shutdown\n"); + return; + } + + drm_atomic_helper_shutdown(drm); +} + +static const struct of_device_id drm_match_table[] = { + { .compatible = "sprd,display-subsystem", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, drm_match_table); + +static struct platform_driver sprd_drm_driver = { + .probe = sprd_drm_probe, + .remove = sprd_drm_remove, + .shutdown = sprd_drm_shutdown, + .driver = { + .name = "sprd-drm-drv", + .of_match_table = drm_match_table, + }, +}; + +static struct platform_driver *sprd_drm_drivers[] = { + &sprd_drm_driver, + &sprd_dpu_driver, + &sprd_dsi_driver, +}; + +static int __init sprd_drm_init(void) +{ + return platform_register_drivers(sprd_drm_drivers, + ARRAY_SIZE(sprd_drm_drivers)); +} + +static void __exit sprd_drm_exit(void) +{ + platform_unregister_drivers(sprd_drm_drivers, + ARRAY_SIZE(sprd_drm_drivers)); +} + +module_init(sprd_drm_init); +module_exit(sprd_drm_exit); + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc DRM KMS Master Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_drm.h b/drivers/gpu/drm/sprd/sprd_drm.h new file mode 100644 index 000000000000..95d1b972f333 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_drm.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef _SPRD_DRM_H_ +#define _SPRD_DRM_H_ + +#include <drm/drm_atomic.h> +#include <drm/drm_print.h> + +struct sprd_drm { + struct drm_device drm; +}; + +extern struct platform_driver sprd_dpu_driver; +extern struct platform_driver sprd_dsi_driver; + +#endif /* _SPRD_DRM_H_ */ diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c new file mode 100644 index 000000000000..911b3cddc264 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -0,0 +1,1073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#include <linux/component.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_graph.h> +#include <video/mipi_display.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_probe_helper.h> + +#include "sprd_drm.h" +#include "sprd_dpu.h" +#include "sprd_dsi.h" + +#define SOFT_RESET 0x04 +#define MASK_PROTOCOL_INT 0x0C +#define MASK_INTERNAL_INT 0x14 +#define DSI_MODE_CFG 0x18 + +#define VIRTUAL_CHANNEL_ID 0x1C +#define GEN_RX_VCID GENMASK(1, 0) +#define VIDEO_PKT_VCID GENMASK(3, 2) + +#define DPI_VIDEO_FORMAT 0x20 +#define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0) +#define LOOSELY18_EN BIT(6) + +#define VIDEO_PKT_CONFIG 0x24 +#define VIDEO_PKT_SIZE GENMASK(15, 0) +#define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16) + +#define VIDEO_LINE_HBLK_TIME 0x28 +#define VIDEO_LINE_HBP_TIME GENMASK(15, 0) +#define VIDEO_LINE_HSA_TIME GENMASK(31, 16) + +#define VIDEO_LINE_TIME 0x2C + +#define VIDEO_VBLK_LINES 0x30 +#define VFP_LINES GENMASK(9, 0) +#define VBP_LINES GENMASK(19, 10) +#define VSA_LINES GENMASK(29, 20) + +#define VIDEO_VACTIVE_LINES 0x34 + +#define VID_MODE_CFG 0x38 +#define VID_MODE_TYPE GENMASK(1, 0) +#define LP_VSA_EN BIT(8) +#define LP_VBP_EN BIT(9) +#define LP_VFP_EN BIT(10) +#define LP_VACT_EN BIT(11) +#define LP_HBP_EN BIT(12) +#define LP_HFP_EN BIT(13) +#define FRAME_BTA_ACK_EN BIT(14) + +#define TIMEOUT_CNT_CLK_CONFIG 0x40 +#define HTX_TO_CONFIG 0x44 +#define LRX_H_TO_CONFIG 0x48 + +#define TX_ESC_CLK_CONFIG 0x5C + +#define CMD_MODE_CFG 0x68 +#define TEAR_FX_EN BIT(0) + +#define GEN_HDR 0x6C +#define GEN_DT GENMASK(5, 0) +#define GEN_VC GENMASK(7, 6) + +#define GEN_PLD_DATA 0x70 + +#define PHY_CLK_LANE_LP_CTRL 0x74 +#define PHY_CLKLANE_TX_REQ_HS BIT(0) +#define AUTO_CLKLANE_CTRL_EN BIT(1) + +#define PHY_INTERFACE_CTRL 0x78 +#define RF_PHY_SHUTDOWN BIT(0) +#define RF_PHY_RESET_N BIT(1) +#define RF_PHY_CLK_EN BIT(2) + +#define CMD_MODE_STATUS 0x98 +#define GEN_CMD_RDATA_FIFO_EMPTY BIT(1) +#define GEN_CMD_WDATA_FIFO_EMPTY BIT(3) +#define GEN_CMD_CMD_FIFO_EMPTY BIT(5) +#define GEN_CMD_RDCMD_DONE BIT(7) + +#define PHY_STATUS 0x9C +#define PHY_LOCK BIT(1) + +#define PHY_MIN_STOP_TIME 0xA0 +#define PHY_LANE_NUM_CONFIG 0xA4 + +#define PHY_CLKLANE_TIME_CONFIG 0xA8 +#define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0) +#define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16) + +#define PHY_DATALANE_TIME_CONFIG 0xAC +#define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0) +#define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16) + +#define MAX_READ_TIME 0xB0 + +#define RX_PKT_CHECK_CONFIG 0xB4 +#define RX_PKT_ECC_EN BIT(0) +#define RX_PKT_CRC_EN BIT(1) + +#define TA_EN 0xB8 + +#define EOTP_EN 0xBC +#define TX_EOTP_EN BIT(0) +#define RX_EOTP_EN BIT(1) + +#define VIDEO_NULLPKT_SIZE 0xC0 +#define DCS_WM_PKT_SIZE 0xC4 + +#define VIDEO_SIG_DELAY_CONFIG 0xD0 +#define VIDEO_SIG_DELAY GENMASK(23, 0) + +#define PHY_TST_CTRL0 0xF0 +#define PHY_TESTCLR BIT(0) +#define PHY_TESTCLK BIT(1) + +#define PHY_TST_CTRL1 0xF4 +#define PHY_TESTDIN GENMASK(7, 0) +#define PHY_TESTDOUT GENMASK(15, 8) +#define PHY_TESTEN BIT(16) + +#define host_to_dsi(host) \ + container_of(host, struct sprd_dsi, host) + +static inline u32 +dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask, + u32 shift) +{ + return (readl(ctx->base + offset) & mask) >> shift; +} + +static inline void +dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask, + u32 shift, u32 val) +{ + u32 ret; + + ret = readl(ctx->base + offset); + ret &= ~mask; + ret |= (val << shift) & mask; + writel(ret, ctx->base + offset); +} + +static inline void +dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask, + u32 val) +{ + u32 ret = readl(ctx->base + offset); + + writel((ret & ~mask) | (val & mask), ctx->base + offset); +} + +static int regmap_tst_io_write(void *context, u32 reg, u32 val) +{ + struct sprd_dsi *dsi = context; + struct dsi_context *ctx = &dsi->ctx; + + if (val > 0xff || reg > 0xff) + return -EINVAL; + + drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val); + + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + + return 0; +} + +static int regmap_tst_io_read(void *context, u32 reg, u32 *val) +{ + struct sprd_dsi *dsi = context; + struct dsi_context *ctx = &dsi->ctx; + int ret; + + if (reg > 0xff) + return -EINVAL; + + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); + dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); + dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); + + udelay(1); + + ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8); + if (ret < 0) + return ret; + + *val = ret; + + drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val); + return 0; +} + +static struct regmap_bus regmap_tst_io = { + .reg_write = regmap_tst_io_write, + .reg_read = regmap_tst_io_read, +}; + +static const struct regmap_config byte_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int dphy_wait_pll_locked(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int i; + + for (i = 0; i < 50000; i++) { + if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1)) + return 0; + udelay(3); + } + + drm_err(dsi->drm, "dphy pll can not be locked\n"); + return -ETIMEDOUT; +} + +static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 5000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 5000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5)) + return 0; + udelay(1); + } + + return -ETIMEDOUT; +} + +static int dsi_wait_rd_resp_completed(struct dsi_context *ctx) +{ + int i; + + for (i = 0; i < 10000; i++) { + if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7)) + return 0; + udelay(10); + } + + return -ETIMEDOUT; +} + +static u16 calc_bytes_per_pixel_x100(int coding) +{ + u16 bpp_x100; + + switch (coding) { + case COLOR_CODE_16BIT_CONFIG1: + case COLOR_CODE_16BIT_CONFIG2: + case COLOR_CODE_16BIT_CONFIG3: + bpp_x100 = 200; + break; + case COLOR_CODE_18BIT_CONFIG1: + case COLOR_CODE_18BIT_CONFIG2: + bpp_x100 = 225; + break; + case COLOR_CODE_24BIT: + bpp_x100 = 300; + break; + case COLOR_CODE_COMPRESSTION: + bpp_x100 = 100; + break; + case COLOR_CODE_20BIT_YCC422_LOOSELY: + bpp_x100 = 250; + break; + case COLOR_CODE_24BIT_YCC422: + bpp_x100 = 300; + break; + case COLOR_CODE_16BIT_YCC422: + bpp_x100 = 200; + break; + case COLOR_CODE_30BIT: + bpp_x100 = 375; + break; + case COLOR_CODE_36BIT: + bpp_x100 = 450; + break; + case COLOR_CODE_12BIT_YCC420: + bpp_x100 = 150; + break; + default: + DRM_ERROR("invalid color coding"); + bpp_x100 = 0; + break; + } + + return bpp_x100; +} + +static u8 calc_video_size_step(int coding) +{ + u8 video_size_step; + + switch (coding) { + case COLOR_CODE_16BIT_CONFIG1: + case COLOR_CODE_16BIT_CONFIG2: + case COLOR_CODE_16BIT_CONFIG3: + case COLOR_CODE_18BIT_CONFIG1: + case COLOR_CODE_18BIT_CONFIG2: + case COLOR_CODE_24BIT: + case COLOR_CODE_COMPRESSTION: + return video_size_step = 1; + case COLOR_CODE_20BIT_YCC422_LOOSELY: + case COLOR_CODE_24BIT_YCC422: + case COLOR_CODE_16BIT_YCC422: + case COLOR_CODE_30BIT: + case COLOR_CODE_36BIT: + case COLOR_CODE_12BIT_YCC420: + return video_size_step = 2; + default: + DRM_ERROR("invalid color coding"); + return 0; + } +} + +static u16 round_video_size(int coding, u16 video_size) +{ + switch (coding) { + case COLOR_CODE_16BIT_YCC422: + case COLOR_CODE_24BIT_YCC422: + case COLOR_CODE_20BIT_YCC422_LOOSELY: + case COLOR_CODE_12BIT_YCC420: + /* round up active H pixels to a multiple of 2 */ + if ((video_size % 2) != 0) + video_size += 1; + break; + default: + break; + } + + return video_size; +} + +#define SPRD_MIPI_DSI_FMT_DSC 0xff +static u32 fmt_to_coding(u32 fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB565: + return COLOR_CODE_16BIT_CONFIG1; + case MIPI_DSI_FMT_RGB666: + case MIPI_DSI_FMT_RGB666_PACKED: + return COLOR_CODE_18BIT_CONFIG1; + case MIPI_DSI_FMT_RGB888: + return COLOR_CODE_24BIT; + case SPRD_MIPI_DSI_FMT_DSC: + return COLOR_CODE_COMPRESSTION; + default: + DRM_ERROR("Unsupported format (%d)\n", fmt); + return COLOR_CODE_24BIT; + } +} + +#define ns_to_cycle(ns, byte_clk) \ + DIV_ROUND_UP((ns) * (byte_clk), 1000000) + +static void sprd_dsi_init(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + u32 byte_clk = dsi->slave->hs_rate / 8; + u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs; + u16 max_rd_time; + int div; + + writel(0, ctx->base + SOFT_RESET); + writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); + writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + DSI_MODE_CFG); + dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0); + dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0); + dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN); + dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN); + writel(1, ctx->base + TA_EN); + dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0); + dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0); + + div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate); + writel(div, ctx->base + TX_ESC_CLK_CONFIG); + + max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk); + writel(max_rd_time, ctx->base + MAX_READ_TIME); + + data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk); + data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk); + clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk); + clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk); + dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, + PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp); + dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, + PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs); + dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, + PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp); + dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, + PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs); + + writel(1, ctx->base + SOFT_RESET); +} + +/* + * Free up resources and shutdown host controller and PHY + */ +static void sprd_dsi_fini(struct dsi_context *ctx) +{ + writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); + writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); + writel(0, ctx->base + SOFT_RESET); +} + +/* + * If not in burst mode, it will compute the video and null packet sizes + * according to necessity. + * Configure timers for data lanes and/or clock lane to return to LP when + * bandwidth is not filled by data. + */ +static int sprd_dsi_dpi_video(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + struct videomode *vm = &ctx->vm; + u32 byte_clk = dsi->slave->hs_rate / 8; + u16 bpp_x100; + u16 video_size; + u32 ratio_x1000; + u16 null_pkt_size = 0; + u8 video_size_step; + u32 hs_to; + u32 total_bytes; + u32 bytes_per_chunk; + u32 chunks = 0; + u32 bytes_left = 0; + u32 chunk_overhead; + const u8 pkt_header = 6; + u8 coding; + int div; + u16 hline; + u16 byte_cycle; + + coding = fmt_to_coding(dsi->slave->format); + video_size = round_video_size(coding, vm->hactive); + bpp_x100 = calc_bytes_per_pixel_x100(coding); + video_size_step = calc_video_size_step(coding); + ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000); + hline = vm->hactive + vm->hsync_len + vm->hfront_porch + + vm->hback_porch; + + writel(0, ctx->base + SOFT_RESET); + dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en); + dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); + dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode); + byte_cycle = 95 * hline * ratio_x1000 / 100000; + dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle); + byte_cycle = hline * ratio_x1000 / 1000; + writel(byte_cycle, ctx->base + VIDEO_LINE_TIME); + byte_cycle = vm->hsync_len * ratio_x1000 / 1000; + dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle); + byte_cycle = vm->hback_porch * ratio_x1000 / 1000; + dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle); + writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch); + dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len); + dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | + LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN | + LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN); + + hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100; + for (div = 0x80; (div < hs_to) && (div > 2); div--) { + if ((hs_to % div) == 0) { + writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG); + writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG); + writel(hs_to / div, ctx->base + HTX_TO_CONFIG); + break; + } + } + + if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) { + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); + writel(0, ctx->base + VIDEO_NULLPKT_SIZE); + dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0); + } else { + /* non burst transmission */ + null_pkt_size = 0; + + /* bytes to be sent - first as one chunk */ + bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header; + + /* hline total bytes from the DPI interface */ + total_bytes = (vm->hactive + vm->hfront_porch) * + ratio_x1000 / dsi->slave->lanes / 1000; + + /* check if the pixels actually fit on the DSI link */ + if (total_bytes < bytes_per_chunk) { + drm_err(dsi->drm, "current resolution can not be set\n"); + return -EINVAL; + } + + chunk_overhead = total_bytes - bytes_per_chunk; + + /* overhead higher than 1 -> enable multi packets */ + if (chunk_overhead > 1) { + /* multi packets */ + for (video_size = video_size_step; + video_size < vm->hactive; + video_size += video_size_step) { + if (vm->hactive * 1000 / video_size % 1000) + continue; + + chunks = vm->hactive / video_size; + bytes_per_chunk = bpp_x100 * video_size / 100 + + pkt_header; + if (total_bytes >= (bytes_per_chunk * chunks)) { + bytes_left = total_bytes - + bytes_per_chunk * chunks; + break; + } + } + + /* prevent overflow (unsigned - unsigned) */ + if (bytes_left > (pkt_header * chunks)) { + null_pkt_size = (bytes_left - + pkt_header * chunks) / chunks; + /* avoid register overflow */ + if (null_pkt_size > 1023) + null_pkt_size = 1023; + } + + } else { + /* single packet */ + chunks = 1; + + /* must be a multiple of 4 except 18 loosely */ + for (video_size = vm->hactive; + (video_size % video_size_step) != 0; + video_size++) + ; + } + + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); + writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE); + dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); + } + + writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); + writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + SOFT_RESET); + + return 0; +} + +static void sprd_dsi_edpi_video(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + const u32 fifo_depth = 1096; + const u32 word_length = 4; + u32 hactive = ctx->vm.hactive; + u32 bpp_x100; + u32 max_fifo_len; + u8 coding; + + coding = fmt_to_coding(dsi->slave->format); + bpp_x100 = calc_bytes_per_pixel_x100(coding); + max_fifo_len = word_length * fifo_depth * 100 / bpp_x100; + + writel(0, ctx->base + SOFT_RESET); + dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); + dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en); + + if (max_fifo_len > hactive) + writel(hactive, ctx->base + DCS_WM_PKT_SIZE); + else + writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE); + + writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); + writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); + writel(1, ctx->base + SOFT_RESET); +} + +/* + * Send a packet on the generic interface, + * this function has an active delay to wait for the buffer to clear. + * The delay is limited to: + * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time + * the controller restricts the sending of. + * + * This function will not be able to send Null and Blanking packets due to + * controller restriction + */ +static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type, + const u8 *param, u16 len) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + u8 wc_lsbyte, wc_msbyte; + u32 payload; + int i, j, ret; + + if (vc > 3) + return -EINVAL; + + /* 1st: for long packet, must config payload first */ + ret = dsi_wait_tx_payload_fifo_empty(ctx); + if (ret) { + drm_err(dsi->drm, "tx payload fifo is not empty\n"); + return ret; + } + + if (len > 2) { + for (i = 0, j = 0; i < len; i += j) { + payload = 0; + for (j = 0; (j < 4) && ((j + i) < (len)); j++) + payload |= param[i + j] << (j * 8); + + writel(payload, ctx->base + GEN_PLD_DATA); + } + wc_lsbyte = len & 0xff; + wc_msbyte = len >> 8; + } else { + wc_lsbyte = (len > 0) ? param[0] : 0; + wc_msbyte = (len > 1) ? param[1] : 0; + } + + /* 2nd: then set packet header */ + ret = dsi_wait_tx_cmd_fifo_empty(ctx); + if (ret) { + drm_err(dsi->drm, "tx cmd fifo is not empty\n"); + return ret; + } + + writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16), + ctx->base + GEN_HDR); + + return 0; +} + +/* + * Send READ packet to peripheral using the generic interface, + * this will force command mode and stop video mode (because of BTA). + * + * This function has an active delay to wait for the buffer to clear, + * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT + * (waiting for command buffer, and waiting for receiving) + * @note this function will enable BTA + */ +static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type, + u8 msb_byte, u8 lsb_byte, + u8 *buffer, u8 bytes_to_read) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int i, ret; + int count = 0; + u32 temp; + + if (vc > 3) + return -EINVAL; + + /* 1st: send read command to peripheral */ + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5); + if (!ret) + return -EIO; + + writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16), + ctx->base + GEN_HDR); + + /* 2nd: wait peripheral response completed */ + ret = dsi_wait_rd_resp_completed(ctx); + if (ret) { + drm_err(dsi->drm, "wait read response time out\n"); + return ret; + } + + /* 3rd: get data from rx payload fifo */ + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); + if (ret) { + drm_err(dsi->drm, "rx payload fifo empty\n"); + return -EIO; + } + + for (i = 0; i < 100; i++) { + temp = readl(ctx->base + GEN_PLD_DATA); + + if (count < bytes_to_read) + buffer[count++] = temp & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 8) & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 16) & 0xff; + if (count < bytes_to_read) + buffer[count++] = (temp >> 24) & 0xff; + + ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); + if (ret) + return count; + } + + return 0; +} + +static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode) +{ + if (mode == DSI_MODE_CMD) + writel(1, ctx->base + DSI_MODE_CFG); + else + writel(0, ctx->base + DSI_MODE_CFG); +} + +static void sprd_dsi_state_reset(struct dsi_context *ctx) +{ + writel(0, ctx->base + SOFT_RESET); + udelay(100); + writel(1, ctx->base + SOFT_RESET); +} + +static int sprd_dphy_init(struct dsi_context *ctx) +{ + struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); + int ret; + + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0); + + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR); + dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); + + dphy_pll_config(ctx); + dphy_timing_config(ctx); + + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); + writel(0x1C, ctx->base + PHY_MIN_STOP_TIME); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); + writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG); + + ret = dphy_wait_pll_locked(ctx); + if (ret) { + drm_err(dsi->drm, "dphy initial failed\n"); + return ret; + } + + return 0; +} + +static void sprd_dphy_fini(struct dsi_context *ctx) +{ + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); + dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); +} + +static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + + drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm); +} + +static void sprd_dsi_encoder_enable(struct drm_encoder *encoder) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); + struct dsi_context *ctx = &dsi->ctx; + + if (ctx->enabled) { + drm_warn(dsi->drm, "dsi is initialized\n"); + return; + } + + sprd_dsi_init(ctx); + if (ctx->work_mode == DSI_MODE_VIDEO) + sprd_dsi_dpi_video(ctx); + else + sprd_dsi_edpi_video(ctx); + + sprd_dphy_init(ctx); + + sprd_dsi_set_work_mode(ctx, ctx->work_mode); + sprd_dsi_state_reset(ctx); + + if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN, + AUTO_CLKLANE_CTRL_EN); + } else { + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); + dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS, + PHY_CLKLANE_TX_REQ_HS); + dphy_wait_pll_locked(ctx); + } + + sprd_dpu_run(dpu); + + ctx->enabled = true; +} + +static void sprd_dsi_encoder_disable(struct drm_encoder *encoder) +{ + struct sprd_dsi *dsi = encoder_to_dsi(encoder); + struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); + struct dsi_context *ctx = &dsi->ctx; + + if (!ctx->enabled) { + drm_warn(dsi->drm, "dsi isn't initialized\n"); + return; + } + + sprd_dpu_stop(dpu); + sprd_dphy_fini(ctx); + sprd_dsi_fini(ctx); + + ctx->enabled = false; +} + +static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = { + .mode_set = sprd_dsi_encoder_mode_set, + .enable = sprd_dsi_encoder_enable, + .disable = sprd_dsi_encoder_disable +}; + +static const struct drm_encoder_funcs sprd_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int sprd_dsi_encoder_init(struct sprd_dsi *dsi, + struct device *dev) +{ + struct drm_encoder *encoder = &dsi->encoder; + u32 crtc_mask; + int ret; + + crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node); + if (!crtc_mask) { + drm_err(dsi->drm, "failed to find crtc mask\n"); + return -EINVAL; + } + + drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask); + + encoder->possible_crtcs = crtc_mask; + ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs, + DRM_MODE_ENCODER_DSI, NULL); + if (ret) { + drm_err(dsi->drm, "failed to init dsi encoder\n"); + return ret; + } + + drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs); + + return 0; +} + +static int sprd_dsi_bridge_init(struct sprd_dsi *dsi, + struct device *dev) +{ + int ret; + + dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(dsi->panel_bridge)) + return PTR_ERR(dsi->panel_bridge); + + ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0); + if (ret) + return ret; + + return 0; +} + +static int sprd_dsi_context_init(struct sprd_dsi *dsi, + struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dsi_context *ctx = &dsi->ctx; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctx->base = devm_ioremap(dev, res->start, resource_size(res)); + if (!ctx->base) { + drm_err(dsi->drm, "failed to map dsi host registers\n"); + return -ENXIO; + } + + ctx->regmap = devm_regmap_init(dev, ®map_tst_io, dsi, &byte_config); + if (IS_ERR(ctx->regmap)) { + drm_err(dsi->drm, "dphy regmap init failed\n"); + return PTR_ERR(ctx->regmap); + } + + ctx->data_hs2lp = 120; + ctx->data_lp2hs = 500; + ctx->clk_hs2lp = 4; + ctx->clk_lp2hs = 15; + ctx->max_rd_time = 6000; + ctx->int0_mask = 0xffffffff; + ctx->int1_mask = 0xffffffff; + ctx->enabled = true; + + return 0; +} + +static int sprd_dsi_bind(struct device *dev, struct device *master, void *data) +{ + struct drm_device *drm = data; + struct sprd_dsi *dsi = dev_get_drvdata(dev); + int ret; + + dsi->drm = drm; + + ret = sprd_dsi_encoder_init(dsi, dev); + if (ret) + return ret; + + ret = sprd_dsi_bridge_init(dsi, dev); + if (ret) + return ret; + + ret = sprd_dsi_context_init(dsi, dev); + if (ret) + return ret; + + return 0; +} + +static void sprd_dsi_unbind(struct device *dev, + struct device *master, void *data) +{ + struct sprd_dsi *dsi = dev_get_drvdata(dev); + + drm_of_panel_bridge_remove(dev->of_node, 1, 0); + + drm_encoder_cleanup(&dsi->encoder); +} + +static const struct component_ops dsi_component_ops = { + .bind = sprd_dsi_bind, + .unbind = sprd_dsi_unbind, +}; + +static int sprd_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *slave) +{ + struct sprd_dsi *dsi = host_to_dsi(host); + struct dsi_context *ctx = &dsi->ctx; + + dsi->slave = slave; + + if (slave->mode_flags & MIPI_DSI_MODE_VIDEO) + ctx->work_mode = DSI_MODE_VIDEO; + else + ctx->work_mode = DSI_MODE_CMD; + + if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES; + else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES; + else + ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS; + + return component_add(host->dev, &dsi_component_ops); +} + +static int sprd_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *slave) +{ + component_del(host->dev, &dsi_component_ops); + + return 0; +} + +static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct sprd_dsi *dsi = host_to_dsi(host); + const u8 *tx_buf = msg->tx_buf; + + if (msg->rx_buf && msg->rx_len) { + u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0; + u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0; + + return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type, + msb, lsb, msg->rx_buf, msg->rx_len); + } + + if (msg->tx_buf && msg->tx_len) + return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type, + tx_buf, msg->tx_len); + + return 0; +} + +static const struct mipi_dsi_host_ops sprd_dsi_host_ops = { + .attach = sprd_dsi_host_attach, + .detach = sprd_dsi_host_detach, + .transfer = sprd_dsi_host_transfer, +}; + +static const struct of_device_id dsi_match_table[] = { + { .compatible = "sprd,sharkl3-dsi-host" }, + { /* sentinel */ }, +}; + +static int sprd_dsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sprd_dsi *dsi; + + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + dev_set_drvdata(dev, dsi); + + dsi->host.ops = &sprd_dsi_host_ops; + dsi->host.dev = dev; + + return mipi_dsi_host_register(&dsi->host); +} + +static int sprd_dsi_remove(struct platform_device *pdev) +{ + struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev); + + mipi_dsi_host_unregister(&dsi->host); + + return 0; +} + +struct platform_driver sprd_dsi_driver = { + .probe = sprd_dsi_probe, + .remove = sprd_dsi_remove, + .driver = { + .name = "sprd-dsi-drv", + .of_match_table = dsi_match_table, + }, +}; + +MODULE_AUTHOR("Leon He <leon.he@unisoc.com>"); +MODULE_AUTHOR("Kevin Tang <kevin.tang@unisoc.com>"); +MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/sprd/sprd_dsi.h b/drivers/gpu/drm/sprd/sprd_dsi.h new file mode 100644 index 000000000000..d858ebb11115 --- /dev/null +++ b/drivers/gpu/drm/sprd/sprd_dsi.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Unisoc Inc. + */ + +#ifndef __SPRD_DSI_H__ +#define __SPRD_DSI_H__ + +#include <linux/of.h> +#include <linux/device.h> +#include <linux/regmap.h> +#include <video/videomode.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_connector.h> +#include <drm/drm_encoder.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_print.h> +#include <drm/drm_panel.h> + +#define encoder_to_dsi(encoder) \ + container_of(encoder, struct sprd_dsi, encoder) + +enum dsi_work_mode { + DSI_MODE_CMD = 0, + DSI_MODE_VIDEO +}; + +enum video_burst_mode { + VIDEO_NON_BURST_WITH_SYNC_PULSES = 0, + VIDEO_NON_BURST_WITH_SYNC_EVENTS, + VIDEO_BURST_WITH_SYNC_PULSES +}; + +enum dsi_color_coding { + COLOR_CODE_16BIT_CONFIG1 = 0, + COLOR_CODE_16BIT_CONFIG2, + COLOR_CODE_16BIT_CONFIG3, + COLOR_CODE_18BIT_CONFIG1, + COLOR_CODE_18BIT_CONFIG2, + COLOR_CODE_24BIT, + COLOR_CODE_20BIT_YCC422_LOOSELY, + COLOR_CODE_24BIT_YCC422, + COLOR_CODE_16BIT_YCC422, + COLOR_CODE_30BIT, + COLOR_CODE_36BIT, + COLOR_CODE_12BIT_YCC420, + COLOR_CODE_COMPRESSTION, + COLOR_CODE_MAX +}; + +enum pll_timing { + NONE, + REQUEST_TIME, + PREPARE_TIME, + SETTLE_TIME, + ZERO_TIME, + TRAIL_TIME, + EXIT_TIME, + CLKPOST_TIME, + TA_GET, + TA_GO, + TA_SURE, + TA_WAIT, +}; + +struct dphy_pll { + u8 refin; /* Pre-divider control signal */ + u8 cp_s; /* 00: SDM_EN=1, 10: SDM_EN=0 */ + u8 fdk_s; /* PLL mode control: integer or fraction */ + u8 sdm_en; + u8 div; + u8 int_n; /* integer N PLL */ + u32 ref_clk; /* dphy reference clock, unit: MHz */ + u32 freq; /* panel config, unit: KHz */ + u32 fvco; + u32 potential_fvco; + u32 nint; /* sigma delta modulator NINT control */ + u32 kint; /* sigma delta modulator KINT control */ + u8 lpf_sel; /* low pass filter control */ + u8 out_sel; /* post divider control */ + u8 vco_band; /* vco range */ + u8 det_delay; +}; + +struct dsi_context { + void __iomem *base; + struct regmap *regmap; + struct dphy_pll pll; + struct videomode vm; + bool enabled; + + u8 work_mode; + u8 burst_mode; + u32 int0_mask; + u32 int1_mask; + + /* maximum time (ns) for data lanes from HS to LP */ + u16 data_hs2lp; + /* maximum time (ns) for data lanes from LP to HS */ + u16 data_lp2hs; + /* maximum time (ns) for clk lanes from HS to LP */ + u16 clk_hs2lp; + /* maximum time (ns) for clk lanes from LP to HS */ + u16 clk_lp2hs; + /* maximum time (ns) for BTA operation - REQUIRED */ + u16 max_rd_time; + /* enable receiving frame ack packets - for video mode */ + bool frame_ack_en; + /* enable receiving tear effect ack packets - for cmd mode */ + bool te_ack_en; +}; + +struct sprd_dsi { + struct drm_device *drm; + struct mipi_dsi_host host; + struct mipi_dsi_device *slave; + struct drm_encoder encoder; + struct drm_bridge *panel_bridge; + struct dsi_context ctx; +}; + +int dphy_pll_config(struct dsi_context *ctx); +void dphy_timing_config(struct dsi_context *ctx); + +#endif /* __SPRD_DSI_H__ */ diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index d0cfdd36b38f..246a94afbe74 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -5,7 +5,6 @@ config DRM_STI select RESET_CONTROLLER select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select FW_LOADER select SND_SOC_HDMI_CODEC if SND_SOC diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index d09b08995b12..3c61ba8b43e0 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -927,12 +927,12 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp) header = (struct fw_header *)firmware->data; if (firmware->size < sizeof(*header)) { - DRM_ERROR("Invalid firmware size (%d)\n", firmware->size); + DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size); goto out; } if ((sizeof(*header) + header->rd_size + header->wr_size + header->pmem_size + header->dmem_size) != firmware->size) { - DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n", + DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n", sizeof(*header), header->rd_size, header->wr_size, header->pmem_size, header->dmem_size, firmware->size); diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index b7d66915a2be..e0379488cd0d 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig @@ -4,7 +4,6 @@ config DRM_STM depends on DRM && (ARCH_STM32 || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 5755f0432e77..befc5a80222d 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -5,7 +5,6 @@ config DRM_SUN4I depends on ARCH_SUNXI || COMPILE_TEST select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS @@ -46,6 +45,7 @@ config DRM_SUN6I_DSI default MACH_SUN8I select CRC_CCITT select DRM_MIPI_DSI + select RESET_CONTROLLER select PHY_SUN6I_MIPI_DPHY help Choose this option if you want have an Allwinner SoC with diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 54dd562e294c..b630614b3d72 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -53,7 +53,7 @@ static const struct drm_driver sun4i_drv_driver = { .minor = 0, /* GEM Operations */ - DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create), }; static int sun4i_drv_bind(struct device *dev) diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 21d473deb757..a8d75fd7e9f4 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -213,11 +213,13 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, goto err_disable_clk_tmds; } + ret = sun8i_hdmi_phy_init(hdmi->phy); + if (ret) + goto err_disable_clk_tmds; + drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); - sun8i_hdmi_phy_init(hdmi->phy); - plat_data->mode_valid = hdmi->quirks->mode_valid; plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe; sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data); @@ -259,6 +261,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master, struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev); dw_hdmi_unbind(hdmi->hdmi); + sun8i_hdmi_phy_deinit(hdmi->phy); clk_disable_unprepare(hdmi->clk_tmds); reset_control_assert(hdmi->rst_ctrl); gpiod_set_value(hdmi->ddc_en, 0); diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index 74f6ed0e2570..bffe1b9cd3dc 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -169,6 +169,7 @@ struct sun8i_hdmi_phy { struct clk *clk_phy; struct clk *clk_pll0; struct clk *clk_pll1; + struct device *dev; unsigned int rcal; struct regmap *regs; struct reset_control *rst_phy; @@ -205,7 +206,8 @@ encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder) int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node); -void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy); +int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy); +void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy); void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy, struct dw_hdmi_plat_data *plat_data); diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index c9239708d398..b64d93da651d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -506,9 +506,60 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy) phy->rcal = (val & SUN8I_HDMI_PHY_ANA_STS_RCAL_MASK) >> 2; } -void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy) +int sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy) { + int ret; + + ret = reset_control_deassert(phy->rst_phy); + if (ret) { + dev_err(phy->dev, "Cannot deassert phy reset control: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(phy->clk_bus); + if (ret) { + dev_err(phy->dev, "Cannot enable bus clock: %d\n", ret); + goto err_assert_rst_phy; + } + + ret = clk_prepare_enable(phy->clk_mod); + if (ret) { + dev_err(phy->dev, "Cannot enable mod clock: %d\n", ret); + goto err_disable_clk_bus; + } + + if (phy->variant->has_phy_clk) { + ret = sun8i_phy_clk_create(phy, phy->dev, + phy->variant->has_second_pll); + if (ret) { + dev_err(phy->dev, "Couldn't create the PHY clock\n"); + goto err_disable_clk_mod; + } + + clk_prepare_enable(phy->clk_phy); + } + phy->variant->phy_init(phy); + + return 0; + +err_disable_clk_mod: + clk_disable_unprepare(phy->clk_mod); +err_disable_clk_bus: + clk_disable_unprepare(phy->clk_bus); +err_assert_rst_phy: + reset_control_assert(phy->rst_phy); + + return ret; +} + +void sun8i_hdmi_phy_deinit(struct sun8i_hdmi_phy *phy) +{ + clk_disable_unprepare(phy->clk_mod); + clk_disable_unprepare(phy->clk_bus); + clk_disable_unprepare(phy->clk_phy); + + reset_control_assert(phy->rst_phy); } void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy, @@ -638,6 +689,7 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev) return -ENOMEM; phy->variant = (struct sun8i_hdmi_phy_variant *)match->data; + phy->dev = dev; ret = of_address_to_resource(node, 0, &res); if (ret) { @@ -696,47 +748,10 @@ static int sun8i_hdmi_phy_probe(struct platform_device *pdev) goto err_put_clk_pll1; } - ret = reset_control_deassert(phy->rst_phy); - if (ret) { - dev_err(dev, "Cannot deassert phy reset control: %d\n", ret); - goto err_put_rst_phy; - } - - ret = clk_prepare_enable(phy->clk_bus); - if (ret) { - dev_err(dev, "Cannot enable bus clock: %d\n", ret); - goto err_deassert_rst_phy; - } - - ret = clk_prepare_enable(phy->clk_mod); - if (ret) { - dev_err(dev, "Cannot enable mod clock: %d\n", ret); - goto err_disable_clk_bus; - } - - if (phy->variant->has_phy_clk) { - ret = sun8i_phy_clk_create(phy, dev, - phy->variant->has_second_pll); - if (ret) { - dev_err(dev, "Couldn't create the PHY clock\n"); - goto err_disable_clk_mod; - } - - clk_prepare_enable(phy->clk_phy); - } - platform_set_drvdata(pdev, phy); return 0; -err_disable_clk_mod: - clk_disable_unprepare(phy->clk_mod); -err_disable_clk_bus: - clk_disable_unprepare(phy->clk_bus); -err_deassert_rst_phy: - reset_control_assert(phy->rst_phy); -err_put_rst_phy: - reset_control_put(phy->rst_phy); err_put_clk_pll1: clk_put(phy->clk_pll1); err_put_clk_pll0: @@ -753,12 +768,6 @@ static int sun8i_hdmi_phy_remove(struct platform_device *pdev) { struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev); - clk_disable_unprepare(phy->clk_mod); - clk_disable_unprepare(phy->clk_bus); - clk_disable_unprepare(phy->clk_phy); - - reset_control_assert(phy->rst_phy); - reset_control_put(phy->rst_phy); clk_put(phy->clk_pll0); diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 16c7aabb94d3..a29d64f87563 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, bool prepare_bandwidth_transition) { const struct tegra_plane_state *old_tegra_state, *new_tegra_state; - const struct tegra_dc_state *old_dc_state, *new_dc_state; u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw; const struct drm_plane_state *old_plane_state; const struct drm_crtc_state *old_crtc_state; @@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc, return; old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); - old_dc_state = to_const_dc_state(old_crtc_state); - new_dc_state = to_const_dc_state(crtc->state); if (!crtc->state->active) { if (!old_crtc_state->active) diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index f0cb691852a1..40378308d527 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state) return NULL; } -static inline const struct tegra_dc_state * -to_const_dc_state(const struct drm_crtc_state *state) -{ - return to_dc_state((struct drm_crtc_state *)state); -} - struct tegra_dc_stats { unsigned long frames; unsigned long vblank; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 6ec598f5d5b3..d38fd7e12b57 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -12,6 +12,7 @@ #include <linux/dma-buf.h> #include <linux/iommu.h> +#include <linux/module.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> @@ -20,6 +21,8 @@ #include "drm.h" #include "gem.h" +MODULE_IMPORT_NS(DMA_BUF); + static void tegra_bo_put(struct host1x_bo *bo) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c index dc16a24f4dbe..690a339c52ec 100644 --- a/drivers/gpu/drm/tegra/uapi.c +++ b/drivers/gpu/drm/tegra/uapi.c @@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f mapping->iova = sg_dma_address(mapping->sgt->sgl); } - mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size; + mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), GFP_KERNEL); diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig index f790a5215302..bc4fa59b6fa9 100644 --- a/drivers/gpu/drm/tidss/Kconfig +++ b/drivers/gpu/drm/tidss/Kconfig @@ -3,7 +3,6 @@ config DRM_TIDSS depends on DRM && OF depends on ARM || ARM64 || COMPILE_TEST select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help The TI Keystone family SoCs introduced a new generation of diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index d620f35688da..7c784e90e40e 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -88,16 +88,11 @@ static int __maybe_unused tidss_resume(struct device *dev) return drm_mode_config_helper_resume(&tidss->ddev); } -#ifdef CONFIG_PM - -static const struct dev_pm_ops tidss_pm_ops = { - .runtime_suspend = tidss_pm_runtime_suspend, - .runtime_resume = tidss_pm_runtime_resume, +static __maybe_unused const struct dev_pm_ops tidss_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume) + SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL) }; -#endif /* CONFIG_PM */ - /* DRM device Information */ static void tidss_release(struct drm_device *ddev) @@ -250,9 +245,7 @@ static struct platform_driver tidss_platform_driver = { .shutdown = tidss_shutdown, .driver = { .name = "tidss", -#ifdef CONFIG_PM - .pm = &tidss_pm_ops, -#endif + .pm = pm_ptr(&tidss_pm_ops), .of_match_table = tidss_of_table, .suppress_bind_attrs = true, }, diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 9f505a149990..e315591eb36b 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -3,7 +3,6 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_BRIDGE select DRM_PANEL_BRIDGE diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 6b03f89a98d4..3ddb7c710a3d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -186,10 +186,8 @@ static void tilcdc_fini(struct drm_device *dev) if (priv->mmio) iounmap(priv->mmio); - if (priv->wq) { - flush_workqueue(priv->wq); + if (priv->wq) destroy_workqueue(priv->wq); - } dev->dev_private = NULL; diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 1ceb93fbdc50..712e0004e96e 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -3,7 +3,7 @@ config DRM_ARCPGU tristate "ARC PGU" depends on DRM && OF - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER help Choose this option if you have an ARC PGU controller. @@ -71,7 +71,7 @@ config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -80,11 +80,24 @@ config TINYDRM_HX8357D If M is selected the module will be called hx8357d. +config TINYDRM_ILI9163 + tristate "DRM support for ILI9163 display panels" + depends on DRM && SPI + select BACKLIGHT_CLASS_DEVICE + select DRM_GEM_CMA_HELPER + select DRM_KMS_HELPER + select DRM_MIPI_DBI + help + DRM driver for the following Ilitek ILI9163 panels: + * NHD-1.8-128160EF 128x160 TFT + + If M is selected the module will be called ili9163. + config TINYDRM_ILI9225 tristate "DRM support for ILI9225 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI help DRM driver for the following Ilitek ILI9225 panels: @@ -96,7 +109,7 @@ config TINYDRM_ILI9341 tristate "DRM support for ILI9341 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -109,7 +122,7 @@ config TINYDRM_ILI9486 tristate "DRM support for ILI9486 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -123,7 +136,7 @@ config TINYDRM_MI0283QT tristate "DRM support for MI0283QT" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help @@ -134,7 +147,7 @@ config TINYDRM_REPAPER tristate "DRM support for Pervasive Displays RePaper panels (V231)" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER help DRM driver for the following Pervasive Displays panels: 1.44" TFT EPD Panel (E1144CS021) @@ -148,7 +161,7 @@ config TINYDRM_ST7586 tristate "DRM support for Sitronix ST7586 display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI help DRM driver for the following Sitronix ST7586 panels: @@ -160,7 +173,7 @@ config TINYDRM_ST7735R tristate "DRM support for Sitronix ST7715R/ST7735R display panels" depends on DRM && SPI select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER select DRM_MIPI_DBI select BACKLIGHT_CLASS_DEVICE help diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index e09942895c77..5d5505d40e7b 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o +obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 2ce3bd903b70..fc26a1ce11ee 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include <linux/console.h> #include <linux/pci.h> #include <drm/drm_aperture.h> @@ -719,7 +718,7 @@ static struct pci_driver bochs_pci_driver = { static int __init bochs_init(void) { - if (vgacon_text_force() && bochs_modeset == -1) + if (drm_firmware_drivers_only() && bochs_modeset == -1) return -EINVAL; if (bochs_modeset == 0) diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 4611ec408506..c95d9ff7d600 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -16,7 +16,6 @@ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com> */ -#include <linux/console.h> #include <linux/dma-buf-map.h> #include <linux/module.h> #include <linux/pci.h> @@ -317,28 +316,28 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_ struct drm_rect *rect) { struct cirrus_device *cirrus = to_cirrus(fb->dev); + void __iomem *dst = cirrus->vram; void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */ int idx; if (!drm_dev_enter(&cirrus->dev, &idx)) return -ENODEV; - if (cirrus->cpp == fb->format->cpp[0]) - drm_fb_memcpy_dstclip(cirrus->vram, fb->pitches[0], - vmap, fb, rect); + if (cirrus->cpp == fb->format->cpp[0]) { + dst += drm_fb_clip_offset(fb->pitches[0], fb->format, rect); + drm_fb_memcpy_toio(dst, fb->pitches[0], vmap, fb, rect); - else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) - drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram, - cirrus->pitch, - vmap, fb, rect, false); + } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2) { + dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect); + drm_fb_xrgb8888_to_rgb565_toio(dst, cirrus->pitch, vmap, fb, rect, false); - else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) - drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram, - cirrus->pitch, - vmap, fb, rect); + } else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3) { + dst += drm_fb_clip_offset(cirrus->pitch, fb->format, rect); + drm_fb_xrgb8888_to_rgb888_toio(dst, cirrus->pitch, vmap, fb, rect); - else + } else { WARN_ON_ONCE("cpp mismatch"); + } drm_dev_exit(idx); @@ -636,8 +635,9 @@ static struct pci_driver cirrus_pci_driver = { static int __init cirrus_init(void) { - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) return -EINVAL; + return pci_register_driver(&cirrus_pci_driver); } diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c new file mode 100644 index 000000000000..bcc181351236 --- /dev/null +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/spi/spi.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_cma_helper.h> +#include <drm/drm_mipi_dbi.h> +#include <drm/drm_modeset_helper.h> + +#include <video/mipi_display.h> + +#define ILI9163_FRMCTR1 0xb1 + +#define ILI9163_PWCTRL1 0xc0 +#define ILI9163_PWCTRL2 0xc1 +#define ILI9163_VMCTRL1 0xc5 +#define ILI9163_VMCTRL2 0xc7 +#define ILI9163_PWCTRLA 0xcb +#define ILI9163_PWCTRLB 0xcf + +#define ILI9163_EN3GAM 0xf2 + +#define ILI9163_MADCTL_BGR BIT(3) +#define ILI9163_MADCTL_MV BIT(5) +#define ILI9163_MADCTL_MX BIT(6) +#define ILI9163_MADCTL_MY BIT(7) + +static void yx240qv29_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state, + struct drm_plane_state *plane_state) +{ + struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); + struct mipi_dbi *dbi = &dbidev->dbi; + u8 addr_mode; + int ret, idx; + + if (!drm_dev_enter(pipe->crtc.dev, &idx)) + return; + + DRM_DEBUG_KMS("\n"); + + ret = mipi_dbi_poweron_conditional_reset(dbidev); + if (ret < 0) + goto out_exit; + if (ret == 1) + goto out_enable; + + /* Gamma */ + mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x04); + mipi_dbi_command(dbi, ILI9163_EN3GAM, 0x00); + + /* Frame Rate */ + mipi_dbi_command(dbi, ILI9163_FRMCTR1, 0x0a, 0x14); + + /* Power Control */ + mipi_dbi_command(dbi, ILI9163_PWCTRL1, 0x0a, 0x00); + mipi_dbi_command(dbi, ILI9163_PWCTRL2, 0x02); + + /* VCOM */ + mipi_dbi_command(dbi, ILI9163_VMCTRL1, 0x2f, 0x3e); + mipi_dbi_command(dbi, ILI9163_VMCTRL2, 0x40); + + /* Memory Access Control */ + mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); + + mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(100); + + mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); + msleep(100); + +out_enable: + switch (dbidev->rotation) { + default: + addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MY; + break; + case 90: + addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MV; + break; + case 180: + addr_mode = 0; + break; + case 270: + addr_mode = ILI9163_MADCTL_MY | ILI9163_MADCTL_MV; + break; + } + addr_mode |= ILI9163_MADCTL_BGR; + mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); + mipi_dbi_enable_flush(dbidev, crtc_state, plane_state); +out_exit: + drm_dev_exit(idx); +} + +static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = { + .enable = yx240qv29_enable, + .disable = mipi_dbi_pipe_disable, + .update = mipi_dbi_pipe_update, + .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, +}; + +static const struct drm_display_mode yx240qv29_mode = { + DRM_SIMPLE_MODE(128, 160, 28, 35), +}; + +DEFINE_DRM_GEM_CMA_FOPS(ili9163_fops); + +static struct drm_driver ili9163_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &ili9163_fops, + DRM_GEM_CMA_DRIVER_OPS_VMAP, + .debugfs_init = mipi_dbi_debugfs_init, + .name = "ili9163", + .desc = "Ilitek ILI9163", + .date = "20210208", + .major = 1, + .minor = 0, +}; + +static const struct of_device_id ili9163_of_match[] = { + { .compatible = "newhaven,1.8-128160EF" }, + { } +}; +MODULE_DEVICE_TABLE(of, ili9163_of_match); + +static const struct spi_device_id ili9163_id[] = { + { "nhd-1.8-128160EF", 0 }, + { } +}; +MODULE_DEVICE_TABLE(spi, ili9163_id); + +static int ili9163_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct mipi_dbi_dev *dbidev; + struct drm_device *drm; + struct mipi_dbi *dbi; + struct gpio_desc *dc; + u32 rotation = 0; + int ret; + + dbidev = devm_drm_dev_alloc(dev, &ili9163_driver, + struct mipi_dbi_dev, drm); + if (IS_ERR(dbidev)) + return PTR_ERR(dbidev); + + dbi = &dbidev->dbi; + drm = &dbidev->drm; + + spi_set_drvdata(spi, drm); + + dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(dbi->reset)) { + DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n"); + return PTR_ERR(dbi->reset); + } + + dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); + if (IS_ERR(dc)) { + DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n"); + return PTR_ERR(dc); + } + + dbidev->backlight = devm_of_find_backlight(dev); + if (IS_ERR(dbidev->backlight)) + return PTR_ERR(dbidev->backlight); + + device_property_read_u32(dev, "rotation", &rotation); + + ret = mipi_dbi_spi_init(spi, dbi, dc); + if (ret) + return ret; + + ret = mipi_dbi_dev_init(dbidev, &ili9163_pipe_funcs, &yx240qv29_mode, rotation); + if (ret) + return ret; + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + drm_fbdev_generic_setup(drm, 0); + + return 0; +} + +static int ili9163_remove(struct spi_device *spi) +{ + struct drm_device *drm = spi_get_drvdata(spi); + + drm_dev_unplug(drm); + drm_atomic_helper_shutdown(drm); + + return 0; +} + +static void ili9163_shutdown(struct spi_device *spi) +{ + drm_atomic_helper_shutdown(spi_get_drvdata(spi)); +} + +static struct spi_driver ili9163_spi_driver = { + .driver = { + .name = "ili9163", + .of_match_table = ili9163_of_match, + }, + .id_table = ili9163_id, + .probe = ili9163_probe, + .remove = ili9163_remove, + .shutdown = ili9163_shutdown, +}; +module_spi_driver(ili9163_spi_driver); + +MODULE_DESCRIPTION("Ilitek ILI9163 DRM driver"); +MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 4d07b21a16e6..97a775c48cea 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -560,7 +560,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb) if (ret) goto out_free; - drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip); + drm_fb_xrgb8888_to_gray8(buf, 0, cma_obj->vaddr, fb, &clip); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index 481b48bde047..b977f5c94562 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -2,6 +2,7 @@ #include <linux/clk.h> #include <linux/of_clk.h> +#include <linux/minmax.h> #include <linux/platform_data/simplefb.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> @@ -570,8 +571,8 @@ static const uint32_t simpledrm_default_formats[] = { //DRM_FORMAT_XRGB1555, //DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB888, - //DRM_FORMAT_XRGB2101010, - //DRM_FORMAT_ARGB2101010, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, }; static const uint64_t simpledrm_format_modifiers[] = { @@ -641,16 +642,25 @@ simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_framebuffer *fb = plane_state->fb; void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ struct drm_device *dev = &sdev->dev; + void __iomem *dst = sdev->screen_base; + struct drm_rect src_clip, dst_clip; int idx; if (!fb) return; + drm_rect_fp_to_int(&src_clip, &plane_state->src); + + dst_clip = plane_state->dst; + if (!drm_rect_intersect(&dst_clip, &src_clip)) + return; + if (!drm_dev_enter(dev, &idx)) return; - drm_fb_blit_dstclip(sdev->screen_base, sdev->pitch, - sdev->format->format, vmap, fb); + dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip); + drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip); + drm_dev_exit(idx); } @@ -680,20 +690,25 @@ simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, void *vmap = shadow_plane_state->data[0].vaddr; /* TODO: Use mapping abstraction */ struct drm_framebuffer *fb = plane_state->fb; struct drm_device *dev = &sdev->dev; - struct drm_rect clip; + void __iomem *dst = sdev->screen_base; + struct drm_rect src_clip, dst_clip; int idx; if (!fb) return; - if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &clip)) + if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip)) + return; + + dst_clip = plane_state->dst; + if (!drm_rect_intersect(&dst_clip, &src_clip)) return; if (!drm_dev_enter(dev, &idx)) return; - drm_fb_blit_rect_dstclip(sdev->screen_base, sdev->pitch, - sdev->format->format, vmap, fb, &clip); + dst += drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip); + drm_fb_blit_toio(dst, sdev->pitch, sdev->format->format, vmap, fb, &src_clip); drm_dev_exit(idx); } @@ -758,6 +773,7 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) struct drm_display_mode *mode = &sdev->mode; struct drm_connector *connector = &sdev->connector; struct drm_simple_display_pipe *pipe = &sdev->pipe; + unsigned long max_width, max_height; const uint32_t *formats; size_t nformats; int ret; @@ -766,10 +782,13 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; + max_width = max_t(unsigned long, mode->hdisplay, DRM_SHADOW_PLANE_MAX_WIDTH); + max_height = max_t(unsigned long, mode->vdisplay, DRM_SHADOW_PLANE_MAX_HEIGHT); + dev->mode_config.min_width = mode->hdisplay; - dev->mode_config.max_width = mode->hdisplay; + dev->mode_config.max_width = max_width; dev->mode_config.min_height = mode->vdisplay; - dev->mode_config.max_height = mode->vdisplay; + dev->mode_config.max_height = max_height; dev->mode_config.prefer_shadow_fbdev = true; dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8; dev->mode_config.funcs = &simpledrm_mode_config_funcs; @@ -788,6 +807,8 @@ static int simpledrm_device_init_modeset(struct simpledrm_device *sdev) if (ret) return ret; + drm_plane_enable_fb_damage_clips(&pipe->plane); + drm_mode_config_reset(dev); return 0; diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index ad0faa8723c2..51b9b9fb3ead 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -73,7 +73,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr, if (!buf) return; - drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip); + drm_fb_xrgb8888_to_gray8(buf, 0, vaddr, fb, clip); src = buf; for (y = clip->y1; y < clip->y2; y++) { diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3934ee225c78..db3dc7ef5382 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -619,7 +619,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, *busy = !ret; } - if (ret && place && !bo->bdev->funcs->eviction_valuable(bo, place)) { + if (ret && place && (bo->resource->mem_type != place->mem_type || + !bo->bdev->funcs->eviction_valuable(bo, place))) { ret = false; if (*locked) { dma_resv_unlock(bo->base.resv); @@ -726,6 +727,8 @@ int ttm_mem_evict_first(struct ttm_device *bdev, ret = ttm_bo_evict(bo, ctx); if (locked) ttm_bo_unreserve(bo); + else + ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_put(bo); return ret; @@ -1083,7 +1086,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, if (timeout == 0) return -EBUSY; - dma_resv_add_excl_fence(bo->base.resv, NULL); return 0; } EXPORT_SYMBOL(ttm_bo_wait); @@ -1102,7 +1104,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, * as an indication that we're about to swap out. */ memset(&place, 0, sizeof(place)); - place.mem_type = TTM_PL_SYSTEM; + place.mem_type = bo->resource->mem_type; if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL)) return -EBUSY; @@ -1134,6 +1136,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_place hop; memset(&hop, 0, sizeof(hop)); + place.mem_type = TTM_PL_SYSTEM; ret = ttm_resource_alloc(bo, &place, &evict_mem); if (unlikely(ret)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 82af095f6b81..72a94301bc95 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -78,22 +78,21 @@ void ttm_mem_io_free(struct ttm_device *bdev, /** * ttm_move_memcpy - Helper to perform a memcpy ttm move operation. - * @bo: The struct ttm_buffer_object. - * @new_mem: The struct ttm_resource we're moving to (copy destination). - * @new_iter: A struct ttm_kmap_iter representing the destination resource. + * @clear: Whether to clear rather than copy. + * @num_pages: Number of pages of the operation. + * @dst_iter: A struct ttm_kmap_iter representing the destination resource. * @src_iter: A struct ttm_kmap_iter representing the source resource. * * This function is intended to be able to move out async under a * dma-fence if desired. */ -void ttm_move_memcpy(struct ttm_buffer_object *bo, +void ttm_move_memcpy(bool clear, u32 num_pages, struct ttm_kmap_iter *dst_iter, struct ttm_kmap_iter *src_iter) { const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops; const struct ttm_kmap_iter_ops *src_ops = src_iter->ops; - struct ttm_tt *ttm = bo->ttm; struct dma_buf_map src_map, dst_map; pgoff_t i; @@ -102,10 +101,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo, return; /* Don't move nonexistent data. Clear destination instead. */ - if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) { - if (ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)) - return; - + if (clear) { for (i = 0; i < num_pages; ++i) { dst_ops->map_local(dst_iter, &dst_map, i); if (dst_map.is_iomem) @@ -148,6 +144,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_kmap_iter_linear_io io; } _dst_iter, _src_iter; struct ttm_kmap_iter *dst_iter, *src_iter; + bool clear; int ret = 0; if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || @@ -171,7 +168,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, goto out_src_iter; } - ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter); + clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); + if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) + ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); if (!src_iter->ops->maps_tt) ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); @@ -190,6 +189,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo) struct ttm_transfer_obj *fbo; fbo = container_of(bo, struct ttm_transfer_obj, base); + dma_resv_fini(&fbo->base.base._resv); ttm_bo_put(fbo->bo); kfree(fbo); } diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 67d68a4a8640..072e0baf2ab4 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -128,15 +128,17 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = { }; /** - * ttm_range_man_init + * ttm_range_man_init_nocheck - Initialise a generic range manager for the + * selected memory type. * * @bdev: ttm device * @type: memory manager type * @use_tt: if the memory manager uses tt * @p_size: size of area to be managed in pages. * - * Initialise a generic range manager for the selected memory type. * The range manager is installed for this device in the type slot. + * + * Return: %0 on success or a negative error code on failure */ int ttm_range_man_init_nocheck(struct ttm_device *bdev, unsigned type, bool use_tt, @@ -166,12 +168,13 @@ int ttm_range_man_init_nocheck(struct ttm_device *bdev, EXPORT_SYMBOL(ttm_range_man_init_nocheck); /** - * ttm_range_man_fini + * ttm_range_man_fini_nocheck - Remove the generic range manager from a slot + * and tear it down. * * @bdev: ttm device * @type: memory manager type * - * Remove the generic range manager from a slot and tear it down. + * Return: %0 on success or a negative error code on failure */ int ttm_range_man_fini_nocheck(struct ttm_device *bdev, unsigned type) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7e83c00a3f48..79c870a3bef8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,6 +34,7 @@ #include <linux/sched.h> #include <linux/shmem_fs.h> #include <linux/file.h> +#include <linux/module.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> diff --git a/drivers/gpu/drm/tve200/Kconfig b/drivers/gpu/drm/tve200/Kconfig index e2d163c74ed6..47a7dbe6c114 100644 --- a/drivers/gpu/drm/tve200/Kconfig +++ b/drivers/gpu/drm/tve200/Kconfig @@ -8,7 +8,6 @@ config DRM_TVE200 select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE help diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 6a8731ab9d7d..6e3113f419f4 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -47,18 +47,18 @@ void v3d_free_object(struct drm_gem_object *obj) /* GPU execution may have dirtied any pages in the BO. */ bo->base.pages_mark_dirty_on_put = true; - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&bo->base); } static const struct drm_gem_object_funcs v3d_gem_funcs = { .free = v3d_free_object, - .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .print_info = drm_gem_shmem_object_print_info, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; /* gem_create_object function for allocating a BO struct and doing @@ -70,11 +70,11 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) struct drm_gem_object *obj; if (size == 0) - return NULL; + return ERR_PTR(-EINVAL); bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) - return NULL; + return ERR_PTR(-ENOMEM); obj = &bo->base.base; obj->funcs = &v3d_gem_funcs; @@ -95,7 +95,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) /* So far we pin the BO in the MMU for its lifetime, so use * shmem's helper for getting a lifetime sgt. */ - sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base); + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) return PTR_ERR(sgt); @@ -141,7 +141,7 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, return bo; free_obj: - drm_gem_shmem_free_object(&shmem_obj->base); + drm_gem_shmem_free(shmem_obj); return ERR_PTR(ret); } @@ -159,7 +159,7 @@ v3d_prime_import_sg_table(struct drm_device *dev, ret = v3d_bo_create_finish(obj); if (ret) { - drm_gem_shmem_free_object(obj); + drm_gem_shmem_free(&to_v3d_bo(obj)->base); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index e47ae40a865a..c7ed2e1cbab6 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -774,7 +774,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; @@ -1007,7 +1007,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, goto fail; ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job), - v3d_job_free, 0, 0, V3D_CACHE_CLEAN); + v3d_job_free, 0, NULL, V3D_CACHE_CLEAN); if (ret) goto fail; diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index a6c81af37345..f35d9e44c6b7 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -7,7 +7,6 @@ * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ -#include <linux/console.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> @@ -193,10 +192,8 @@ static const struct drm_driver driver = { static int __init vbox_init(void) { -#ifdef CONFIG_VGA_CONSOLE - if (vgacon_text_force() && vbox_modeset == -1) + if (drm_firmware_drivers_only() && vbox_modeset == -1) return -EINVAL; -#endif if (vbox_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index f28779715ccd..c9e8b3a63c62 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -127,8 +127,8 @@ int vbox_hw_init(struct vbox_private *vbox) /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1, "vboxvideo-accel"); - if (!vbox->guest_pool) - return -ENOMEM; + if (IS_ERR(vbox->guest_pool)) + return PTR_ERR(vbox->guest_pool); ret = gen_pool_add_virt(vbox->guest_pool, (unsigned long)vbox->guest_heap, diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig index 345a5570a3da..de3424fed2fc 100644 --- a/drivers/gpu/drm/vc4/Kconfig +++ b/drivers/gpu/drm/vc4/Kconfig @@ -6,7 +6,6 @@ config DRM_VC4 depends on SND && SND_SOC depends on COMMON_CLK select DRM_KMS_HELPER - select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select DRM_PANEL_BRIDGE select SND_PCM diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index fddaeb0b09c1..6d1281a343e9 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -177,7 +177,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) bo->validated_shader = NULL; } - drm_gem_cma_free_object(obj); + drm_gem_cma_free(&bo->base); } static void vc4_bo_remove_from_cache(struct vc4_bo *bo) @@ -720,7 +720,7 @@ static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct return -EINVAL; } - return drm_gem_cma_mmap(obj, vma); + return drm_gem_cma_mmap(&bo->base, vma); } static const struct vm_operations_struct vc4_vm_ops = { @@ -732,8 +732,8 @@ static const struct vm_operations_struct vc4_vm_ops = { static const struct drm_gem_object_funcs vc4_gem_object_funcs = { .free = vc4_free_object, .export = vc4_prime_export, - .get_sg_table = drm_gem_cma_get_sg_table, - .vmap = drm_gem_cma_vmap, + .get_sg_table = drm_gem_cma_object_get_sg_table, + .vmap = drm_gem_cma_object_vmap, .mmap = vc4_gem_object_mmap, .vm_ops = &vc4_vm_ops, }; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 18f5009ce90e..287dbc89ad64 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -32,6 +32,7 @@ #include <linux/clk.h> #include <linux/component.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -42,6 +43,7 @@ #include <drm/drm_vblank.h> #include "vc4_drv.h" +#include "vc4_hdmi.h" #include "vc4_regs.h" #define HVS_FIFO_LATENCY_PIX 6 @@ -279,27 +281,15 @@ static u32 vc4_crtc_get_fifo_full_level_bits(struct vc4_crtc *vc4_crtc, * allows drivers to push pixels to more than one encoder from the * same CRTC. */ -static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_connector_state *(*get_state)(struct drm_atomic_state *state, - struct drm_connector *connector)) +struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, + struct drm_crtc_state *state) { - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; + struct drm_encoder *encoder; - drm_connector_list_iter_begin(crtc->dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct drm_connector_state *conn_state = get_state(state, connector); + WARN_ON(hweight32(state->encoder_mask) > 1); - if (!conn_state) - continue; - - if (conn_state->crtc == crtc) { - drm_connector_list_iter_end(&conn_iter); - return connector->encoder; - } - } - drm_connector_list_iter_end(&conn_iter); + drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) + return encoder; return NULL; } @@ -313,12 +303,11 @@ static void vc4_crtc_pixelvalve_reset(struct drm_crtc *crtc) CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_FIFO_CLR); } -static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_atomic_state *state) +static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_new_connector_state); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc); @@ -496,8 +485,10 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) enum vc4_encoder_type encoder_type; const struct vc4_pv_data *pv_data; struct drm_encoder *encoder; + struct vc4_hdmi *vc4_hdmi; unsigned encoder_sel; int channel; + int ret; if (!(of_device_is_compatible(vc4_crtc->pdev->dev.of_node, "brcm,bcm2711-pixelvalve2") || @@ -525,7 +516,20 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (WARN_ON(!encoder)) return 0; - return vc4_crtc_disable(crtc, encoder, NULL, channel); + vc4_hdmi = encoder_to_vc4_hdmi(encoder); + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + ret = vc4_crtc_disable(crtc, encoder, NULL, channel); + if (ret) + return ret; + + ret = pm_runtime_put(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + return 0; } static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, @@ -534,10 +538,12 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_old_connector_state); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, old_state); struct drm_device *dev = crtc->dev; + drm_dbg(dev, "Disabling CRTC %s (%u) connected to Encoder %s (%u)", + crtc->name, crtc->base.id, encoder->name, encoder->base.id); + require_hvs_enabled(dev); /* Disable vblank irq handling before crtc is disabled. */ @@ -562,12 +568,16 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, + crtc); struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, state, - drm_atomic_get_new_connector_state); + struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc, new_state); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + drm_dbg(dev, "Enabling CRTC %s (%u) connected to Encoder %s (%u)", + crtc->name, crtc->base.id, encoder->name, encoder->base.id); + require_hvs_enabled(dev); /* Enable vblank irq handling before crtc is started otherwise @@ -580,7 +590,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, if (vc4_encoder->pre_crtc_configure) vc4_encoder->pre_crtc_configure(encoder, state); - vc4_crtc_config_pv(crtc, state); + vc4_crtc_config_pv(crtc, encoder, state); CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN); @@ -649,12 +659,27 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_connector *conn; struct drm_connector_state *conn_state; + struct drm_encoder *encoder; int ret, i; ret = vc4_hvs_atomic_check(crtc, state); if (ret) return ret; + encoder = vc4_get_crtc_encoder(crtc, crtc_state); + if (encoder) { + const struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); + + mode = &crtc_state->adjusted_mode; + if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) { + vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000, + mode->clock * 9 / 10) * 1000; + } else { + vc4_state->hvs_load = mode->clock * 1000; + } + } + for_each_new_connector_in_state(state, conn, conn_state, i) { if (conn_state->crtc != crtc) @@ -691,14 +716,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) struct drm_crtc *crtc = &vc4_crtc->base; struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); - u32 chan = vc4_state->assigned_channel; + u32 chan = vc4_crtc->current_hvs_channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); + spin_lock(&vc4_crtc->irq_lock); if (vc4_crtc->event && - (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) || - vc4_state->feed_txp)) { + (vc4_crtc->current_dlist == HVS_READ(SCALER_DISPLACTX(chan)) || + vc4_crtc->feeds_txp)) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; drm_crtc_vblank_put(crtc); @@ -711,6 +736,7 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) */ vc4_hvs_unmask_underrun(dev, chan); } + spin_unlock(&vc4_crtc->irq_lock); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -876,7 +902,6 @@ struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; old_vc4_state = to_vc4_crtc_state(crtc->state); - vc4_state->feed_txp = old_vc4_state->feed_txp; vc4_state->margins = old_vc4_state->margins; vc4_state->assigned_channel = old_vc4_state->assigned_channel; @@ -937,6 +962,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { .mode_valid = vc4_crtc_mode_valid, .atomic_check = vc4_crtc_atomic_check, + .atomic_begin = vc4_hvs_atomic_begin, .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_crtc_atomic_enable, .atomic_disable = vc4_crtc_atomic_disable, @@ -1111,6 +1137,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, return PTR_ERR(primary_plane); } + spin_lock_init(&vc4_crtc->irq_lock); drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c index 6da22af4ee91..ba2d8ea562af 100644 --- a/drivers/gpu/drm/vc4/vc4_debugfs.c +++ b/drivers/gpu/drm/vc4/vc4_debugfs.c @@ -7,6 +7,7 @@ #include <linux/circ_buf.h> #include <linux/ctype.h> #include <linux/debugfs.h> +#include <linux/platform_device.h> #include "vc4_drv.h" #include "vc4_regs.h" @@ -26,8 +27,10 @@ vc4_debugfs_init(struct drm_minor *minor) struct vc4_dev *vc4 = to_vc4_dev(minor->dev); struct vc4_debugfs_info_entry *entry; - debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, - minor->debugfs_root, &vc4->load_tracker_enabled); + if (!of_device_is_compatible(vc4->hvs->pdev->dev.of_node, + "brcm,bcm2711-vc5")) + debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, + minor->debugfs_root, &vc4->load_tracker_enabled); list_for_each_entry(entry, &vc4->debugfs_list, link) { drm_debugfs_create_files(&entry->info, 1, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index ef73e0aaf726..4329e09d357c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -202,9 +202,6 @@ struct vc4_dev { int power_refcount; - /* Set to true when the load tracker is supported. */ - bool load_tracker_available; - /* Set to true when the load tracker is active. */ bool load_tracker_enabled; @@ -495,6 +492,33 @@ struct vc4_crtc { struct drm_pending_vblank_event *event; struct debugfs_regset32 regset; + + /** + * @feeds_txp: True if the CRTC feeds our writeback controller. + */ + bool feeds_txp; + + /** + * @irq_lock: Spinlock protecting the resources shared between + * the atomic code and our vblank handler. + */ + spinlock_t irq_lock; + + /** + * @current_dlist: Start offset of the display list currently + * set in the HVS for that CRTC. Protected by @irq_lock, and + * copied in vc4_hvs_update_dlist() for the CRTC interrupt + * handler to have access to that value. + */ + unsigned int current_dlist; + + /** + * @current_hvs_channel: HVS channel currently assigned to the + * CRTC. Protected by @irq_lock, and copied in + * vc4_hvs_atomic_begin() for the CRTC interrupt handler to have + * access to that value. + */ + unsigned int current_hvs_channel; }; static inline struct vc4_crtc * @@ -517,11 +541,13 @@ vc4_crtc_to_vc4_pv_data(const struct vc4_crtc *crtc) return container_of(data, struct vc4_pv_data, base); } +struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc, + struct drm_crtc_state *state); + struct vc4_crtc_state { struct drm_crtc_state base; /* Dlist area for this CRTC configuration. */ struct drm_mm_node mm; - bool feed_txp; bool txp_armed; unsigned int assigned_channel; @@ -532,6 +558,8 @@ struct vc4_crtc_state { unsigned int bottom; } margins; + unsigned long hvs_load; + /* Transitional state below, only valid during atomic commits */ bool update_muxing; }; @@ -908,6 +936,7 @@ extern struct platform_driver vc4_hvs_driver; void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output); int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 01402a2c9358..053fbaf765ca 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -94,6 +94,7 @@ # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) +#define HSM_MIN_CLOCK_FREQ 120000000 #define CEC_CLOCK_FREQ 40000 #define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000) @@ -117,6 +118,10 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_M_CTL, VC4_HD_M_SW_RST); udelay(1); HDMI_WRITE(HDMI_M_CTL, 0); @@ -128,24 +133,36 @@ static void vc4_hdmi_reset(struct vc4_hdmi *vc4_hdmi) VC4_HDMI_SW_RESET_FORMAT_DETECT); HDMI_WRITE(HDMI_SW_RESET_CONTROL, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_reset(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + reset_control_reset(vc4_hdmi->reset); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_DVP_CTL, 0); HDMI_WRITE(HDMI_CLOCK_STOP, HDMI_READ(HDMI_CLOCK_STOP) | VC4_DVP_HT_CLOCK_STOP_PIXEL); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } #ifdef CONFIG_DRM_VC4_HDMI_CEC static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) { + unsigned long cec_rate = clk_get_rate(vc4_hdmi->cec_clock); + unsigned long flags; u16 clk_cnt; u32 value; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + value = HDMI_READ(HDMI_CEC_CNTRL_1); value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; @@ -153,29 +170,41 @@ static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) * Set the clock divider: the hsm_clock rate and this divider * setting will give a 40 kHz CEC clock. */ - clk_cnt = clk_get_rate(vc4_hdmi->cec_clock) / CEC_CLOCK_FREQ; + clk_cnt = cec_rate / CEC_CLOCK_FREQ; value |= clk_cnt << VC4_HDMI_CEC_DIV_CLK_CNT_SHIFT; HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } #else static void vc4_hdmi_cec_update_clk_div(struct vc4_hdmi *vc4_hdmi) {} #endif +static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder); + static enum drm_connector_status vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); bool connected = false; + mutex_lock(&vc4_hdmi->mutex); + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); - if (vc4_hdmi->hpd_gpio && - gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { - connected = true; - } else if (drm_probe_ddc(vc4_hdmi->ddc)) { - connected = true; - } else if (HDMI_READ(HDMI_HOTPLUG) & VC4_HDMI_HOTPLUG_CONNECTED) { - connected = true; + if (vc4_hdmi->hpd_gpio) { + if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) + connected = true; + } else { + unsigned long flags; + u32 hotplug; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + hotplug = HDMI_READ(HDMI_HOTPLUG); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED) + connected = true; } if (connected) { @@ -189,12 +218,15 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) } } + vc4_hdmi_enable_scrambling(&vc4_hdmi->encoder.base.base); pm_runtime_put(&vc4_hdmi->pdev->dev); + mutex_unlock(&vc4_hdmi->mutex); return connector_status_connected; } cec_phys_addr_invalidate(vc4_hdmi->cec_adap); pm_runtime_put(&vc4_hdmi->pdev->dev); + mutex_unlock(&vc4_hdmi->mutex); return connector_status_disconnected; } @@ -211,10 +243,14 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) int ret = 0; struct edid *edid; + mutex_lock(&vc4_hdmi->mutex); + edid = drm_get_edid(connector, vc4_hdmi->ddc); cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid); - if (!edid) - return -ENODEV; + if (!edid) { + ret = -ENODEV; + goto out; + } vc4_encoder->hdmi_monitor = drm_detect_hdmi_monitor(edid); @@ -234,6 +270,9 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) } } +out: + mutex_unlock(&vc4_hdmi->mutex); + return ret; } @@ -368,9 +407,12 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = type - 0x80; + unsigned long flags; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (!poll) return 0; @@ -390,6 +432,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, void __iomem *base = __vc4_hdmi_get_field_base(vc4_hdmi, ram_packet_start->reg); uint8_t buffer[VC4_HDMI_PACKET_STRIDE]; + unsigned long flags; ssize_t len, i; int ret; @@ -407,6 +450,8 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, return; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + for (i = 0; i < len; i += 7) { writel(buffer[i + 0] << 0 | buffer[i + 1] << 8 | @@ -424,6 +469,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, HDMI_READ(HDMI_RAM_PACKET_CONFIG) | BIT(packet_id)); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for((HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); if (ret) @@ -436,11 +484,12 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; - const struct drm_display_mode *mode = &crtc->state->adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; union hdmi_infoframe frame; int ret; + lockdep_assert_held(&vc4_hdmi->mutex); + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, connector, mode); if (ret < 0) { @@ -492,6 +541,8 @@ static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder) struct drm_connector_state *conn_state = connector->state; union hdmi_infoframe frame; + lockdep_assert_held(&vc4_hdmi->mutex); + if (!vc4_hdmi->variant->supports_hdr) return; @@ -508,6 +559,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + lockdep_assert_held(&vc4_hdmi->mutex); + vc4_hdmi_set_avi_infoframe(encoder); vc4_hdmi_set_spd_infoframe(encoder); /* @@ -527,6 +580,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_display_info *display = &vc4_hdmi->connector.display_info; + lockdep_assert_held(&vc4_hdmi->mutex); + if (!vc4_encoder->hdmi_monitor) return false; @@ -542,10 +597,10 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; - struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + unsigned long flags; + + lockdep_assert_held(&vc4_hdmi->mutex); if (!vc4_hdmi_supports_scrambling(encoder, mode)) return; @@ -556,8 +611,12 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, true); drm_scdc_set_scrambling(vc4_hdmi->ddc, true); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) | VC5_HDMI_SCRAMBLER_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + vc4_hdmi->scdc_enabled = true; queue_delayed_work(system_wq, &vc4_hdmi->scrambling_work, msecs_to_jiffies(SCRAMBLING_POLLING_DELAY_MS)); @@ -566,25 +625,22 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; + unsigned long flags; - /* - * At boot, connector->state will be NULL. Since we don't know the - * state of the scrambler and in order to avoid any - * inconsistency, let's disable it all the time. - */ - if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) - return; + lockdep_assert_held(&vc4_hdmi->mutex); - if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) + if (!vc4_hdmi->scdc_enabled) return; + vc4_hdmi->scdc_enabled = false; + if (delayed_work_pending(&vc4_hdmi->scrambling_work)) cancel_delayed_work_sync(&vc4_hdmi->scrambling_work); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_SCRAMBLER_CTL, HDMI_READ(HDMI_SCRAMBLER_CTL) & ~VC5_HDMI_SCRAMBLER_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); drm_scdc_set_scrambling(vc4_hdmi->ddc, false); drm_scdc_set_high_tmds_clock_ratio(vc4_hdmi->ddc, false); @@ -610,26 +666,43 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, 0); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_CLRRGB); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + mdelay(1); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) & ~VC4_HD_VID_CTL_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + vc4_hdmi_disable_scrambling(encoder); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long flags; int ret; + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_VID_CTL, HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (vc4_hdmi->variant->phy_disable) vc4_hdmi->variant->phy_disable(vc4_hdmi); @@ -640,16 +713,26 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, ret = pm_runtime_put(&vc4_hdmi->pdev->dev); if (ret < 0) DRM_ERROR("Failed to release power domain: %d\n", ret); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_disable(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + vc4_hdmi->output_enabled = false; + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) { + unsigned long flags; u32 csc_ctl; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + csc_ctl = VC4_SET_FIELD(VC4_HD_CSC_CTL_ORDER_BGR, VC4_HD_CSC_CTL_ORDER); @@ -679,14 +762,19 @@ static void vc4_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) /* The RGB order applies even when CSC is disabled. */ HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) { + unsigned long flags; u32 csc_ctl; csc_ctl = 0x07; /* RGB_CONVERT_MODE = custom matrix, || USE_RGB_TO_YCBCR */ + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + if (enable) { /* CEA VICs other than #1 requre limited range RGB * output unless overridden by an AVI infoframe. @@ -718,6 +806,8 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) } HDMI_WRITE(HDMI_CSC_CTL, csc_ctl); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, @@ -741,6 +831,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_HORZA, (vsync_pos ? VC4_HDMI_HORZA_VPOS : 0) | @@ -764,6 +857,8 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_VERTB0, vertb_even); HDMI_WRITE(HDMI_VERTB1, vertb); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, @@ -787,10 +882,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); + unsigned long flags; unsigned char gcp; bool gcp_en; u32 reg; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); HDMI_WRITE(HDMI_HORZA, (vsync_pos ? VC5_HDMI_HORZA_VPOS : 0) | @@ -849,13 +947,18 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_GCP_CONFIG, reg); HDMI_WRITE(HDMI_CLOCK_STOP, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; u32 drift; int ret; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + drift = HDMI_READ(HDMI_FIFO_CTL); drift &= VC4_HDMI_FIFO_VALID_WRITE_MASK; @@ -863,12 +966,20 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) drift & ~VC4_HDMI_FIFO_CTL_RECENTER); HDMI_WRITE(HDMI_FIFO_CTL, drift | VC4_HDMI_FIFO_CTL_RECENTER); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + usleep_range(1000, 1100); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_FIFO_CTL, drift & ~VC4_HDMI_FIFO_CTL_RECENTER); HDMI_WRITE(HDMI_FIFO_CTL, drift | VC4_HDMI_FIFO_CTL_RECENTER); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(HDMI_READ(HDMI_FIFO_CTL) & VC4_HDMI_FIFO_CTL_RECENTER_DONE, 1); WARN_ONCE(ret, "Timeout waiting for " @@ -898,31 +1009,14 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, vc4_hdmi_encoder_get_connector_state(encoder, state); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - unsigned long bvb_rate, pixel_rate, hsm_rate; + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + unsigned long pixel_rate = vc4_conn_state->pixel_rate; + unsigned long bvb_rate, hsm_rate; + unsigned long flags; int ret; - ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); - if (ret < 0) { - DRM_ERROR("Failed to retain power domain: %d\n", ret); - return; - } - - pixel_rate = vc4_conn_state->pixel_rate; - ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); - if (ret) { - DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); - return; - } - - ret = clk_prepare_enable(vc4_hdmi->pixel_clock); - if (ret) { - DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); - return; - } + mutex_lock(&vc4_hdmi->mutex); /* * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must @@ -944,9 +1038,28 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate); if (ret) { DRM_ERROR("Failed to set HSM clock rate: %d\n", ret); - return; + goto out; + } + + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret < 0) { + DRM_ERROR("Failed to retain power domain: %d\n", ret); + goto out; } + ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); + if (ret) { + DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); + goto err_put_runtime_pm; + } + + ret = clk_prepare_enable(vc4_hdmi->pixel_clock); + if (ret) { + DRM_ERROR("Failed to turn on pixel clock: %d\n", ret); + goto err_put_runtime_pm; + } + + vc4_hdmi_cec_update_clk_div(vc4_hdmi); if (pixel_rate > 297000000) @@ -959,39 +1072,52 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); if (ret) { DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); - clk_disable_unprepare(vc4_hdmi->pixel_clock); - return; + goto err_disable_pixel_clock; } ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); if (ret) { DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); - clk_disable_unprepare(vc4_hdmi->pixel_clock); - return; + goto err_disable_pixel_clock; } if (vc4_hdmi->variant->phy_init) vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_SCHEDULER_CONTROL, HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MANUAL_FORMAT | VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + if (vc4_hdmi->variant->set_timings) vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode); + + mutex_unlock(&vc4_hdmi->mutex); + + return; + +err_disable_pixel_clock: + clk_disable_unprepare(vc4_hdmi->pixel_clock); +err_put_runtime_pm: + pm_runtime_put(&vc4_hdmi->pdev->dev); +out: + mutex_unlock(&vc4_hdmi->mutex); + return; } static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; - struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; + struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); if (vc4_encoder->hdmi_monitor && drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED) { @@ -1006,23 +1132,28 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, vc4_encoder->limited_rgb_range = false; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; bool vsync_pos = mode->flags & DRM_MODE_FLAG_PVSYNC; + unsigned long flags; int ret; + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_VID_CTL, VC4_HD_VID_CTL_ENABLE | VC4_HD_VID_CTL_CLRRGB | @@ -1039,6 +1170,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_READ(HDMI_SCHEDULER_CONTROL) | VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE, 1000); WARN_ONCE(ret, "Timeout waiting for " @@ -1051,6 +1184,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_READ(HDMI_SCHEDULER_CONTROL) & ~VC4_HDMI_SCHEDULER_CONTROL_MODE_HDMI); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + ret = wait_for(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE), 1000); WARN_ONCE(ret, "Timeout waiting for " @@ -1058,6 +1193,8 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, } if (vc4_encoder->hdmi_monitor) { + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + WARN_ON(!(HDMI_READ(HDMI_SCHEDULER_CONTROL) & VC4_HDMI_SCHEDULER_CONTROL_HDMI_ACTIVE)); HDMI_WRITE(HDMI_SCHEDULER_CONTROL, @@ -1067,15 +1204,37 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, VC4_HDMI_RAM_PACKET_ENABLE); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + vc4_hdmi_set_infoframes(encoder); } vc4_hdmi_recenter_fifo(vc4_hdmi); vc4_hdmi_enable_scrambling(encoder); + + mutex_unlock(&vc4_hdmi->mutex); } static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + vc4_hdmi->output_enabled = true; + mutex_unlock(&vc4_hdmi->mutex); +} + +static void vc4_hdmi_encoder_atomic_mode_set(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + + mutex_lock(&vc4_hdmi->mutex); + memcpy(&vc4_hdmi->saved_adjusted_mode, + &crtc_state->adjusted_mode, + sizeof(vc4_hdmi->saved_adjusted_mode)); + mutex_unlock(&vc4_hdmi->mutex); } #define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL @@ -1154,6 +1313,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { .atomic_check = vc4_hdmi_encoder_atomic_check, + .atomic_mode_set = vc4_hdmi_encoder_atomic_mode_set, .mode_valid = vc4_hdmi_encoder_mode_valid, .disable = vc4_hdmi_encoder_disable, .enable = vc4_hdmi_encoder_enable, @@ -1188,6 +1348,7 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { u32 hsm_clock = clk_get_rate(vc4_hdmi->audio_clock); + unsigned long flags; unsigned long n, m; rational_best_approximation(hsm_clock, samplerate, @@ -1197,19 +1358,22 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, VC4_HD_MAI_SMP_M_SHIFT) + 1, &n, &m); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_SMP, VC4_SET_FIELD(n, VC4_HD_MAI_SMP_N) | VC4_SET_FIELD(m - 1, VC4_HD_MAI_SMP_M)); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_crtc *crtc = connector->state->crtc; - const struct drm_display_mode *mode = &crtc->state->adjusted_mode; + const struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; u32 n, cts; u64 tmp; + lockdep_assert_held(&vc4_hdmi->mutex); + lockdep_assert_held(&vc4_hdmi->hw_lock); + n = 128 * samplerate / 1000; tmp = (u64)(mode->clock * 1000) * n; do_div(tmp, 128 * samplerate); @@ -1235,31 +1399,54 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) return snd_soc_card_get_drvdata(card); } -static int vc4_hdmi_audio_startup(struct device *dev, void *data) +static bool vc4_hdmi_audio_can_stream(struct vc4_hdmi *vc4_hdmi) { - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - struct drm_connector *connector = &vc4_hdmi->connector; + lockdep_assert_held(&vc4_hdmi->mutex); + + /* + * If the controller is disabled, prevent any ALSA output. + */ + if (!vc4_hdmi->output_enabled) + return false; /* - * If the HDMI encoder hasn't probed, or the encoder is - * currently in DVI mode, treat the codec dai as missing. + * If the encoder is currently in DVI mode, treat the codec DAI + * as missing. */ - if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & - VC4_HDMI_RAM_PACKET_ENABLE)) + if (!(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) + return false; + + return true; +} + +static int vc4_hdmi_audio_startup(struct device *dev, void *data) +{ + struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { + mutex_unlock(&vc4_hdmi->mutex); return -ENODEV; + } vc4_hdmi->audio.streaming = true; + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET | VC4_HD_MAI_CTL_FLUSH | VC4_HD_MAI_CTL_DLATE | VC4_HD_MAI_CTL_ERRORE | VC4_HD_MAI_CTL_ERRORF); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); if (vc4_hdmi->variant->phy_rng_enable) vc4_hdmi->variant->phy_rng_enable(vc4_hdmi); + mutex_unlock(&vc4_hdmi->mutex); + return 0; } @@ -1267,32 +1454,48 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) { struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; struct device *dev = &vc4_hdmi->pdev->dev; + unsigned long flags; int ret; + lockdep_assert_held(&vc4_hdmi->mutex); + vc4_hdmi->audio.streaming = false; ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false); if (ret) dev_err(dev, "Failed to stop audio infoframe: %d\n", ret); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_RESET); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_ERRORF); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_FLUSH); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static void vc4_hdmi_audio_shutdown(struct device *dev, void *data) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); + unsigned long flags; + + mutex_lock(&vc4_hdmi->mutex); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_HD_MAI_CTL_DLATE | VC4_HD_MAI_CTL_ERRORE | VC4_HD_MAI_CTL_ERRORF); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + if (vc4_hdmi->variant->phy_rng_disable) vc4_hdmi->variant->phy_rng_disable(vc4_hdmi); vc4_hdmi->audio.streaming = false; vc4_hdmi_audio_reset(vc4_hdmi); + + mutex_unlock(&vc4_hdmi->mutex); } static int sample_rate_to_mai_fmt(int samplerate) @@ -1342,6 +1545,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; unsigned int sample_rate = params->sample_rate; unsigned int channels = params->channels; + unsigned long flags; u32 audio_packet_config, channel_mask; u32 channel_map; u32 mai_audio_format; @@ -1350,14 +1554,22 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__, sample_rate, params->sample_width, channels); + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { + mutex_unlock(&vc4_hdmi->mutex); + return -EINVAL; + } + + vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate); + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_MAI_CTL, VC4_SET_FIELD(channels, VC4_HD_MAI_CTL_CHNUM) | VC4_HD_MAI_CTL_WHOLSMP | VC4_HD_MAI_CTL_CHALIGN | VC4_HD_MAI_CTL_ENABLE); - vc4_hdmi_audio_set_mai_clock(vc4_hdmi, sample_rate); - mai_sample_rate = sample_rate_to_mai_fmt(sample_rate); if (params->iec.status[0] & IEC958_AES0_NONAUDIO && params->channels == 8) @@ -1395,22 +1607,19 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, channel_map = vc4_hdmi->variant->channel_map(vc4_hdmi, channel_mask); HDMI_WRITE(HDMI_MAI_CHANNEL_MAP, channel_map); HDMI_WRITE(HDMI_AUDIO_PACKET_CONFIG, audio_packet_config); + vc4_hdmi_set_n_cts(vc4_hdmi, sample_rate); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + memcpy(&vc4_hdmi->audio.infoframe, ¶ms->cea, sizeof(params->cea)); vc4_hdmi_set_audio_infoframe(encoder); + mutex_unlock(&vc4_hdmi->mutex); + return 0; } -static const struct snd_soc_dapm_widget vc4_hdmi_audio_widgets[] = { - SND_SOC_DAPM_OUTPUT("TX"), -}; - -static const struct snd_soc_dapm_route vc4_hdmi_audio_routes[] = { - { "TX", NULL, "Playback" }, -}; - static const struct snd_soc_component_driver vc4_hdmi_audio_cpu_dai_comp = { .name = "vc4-hdmi-cpu-dai-component", }; @@ -1450,7 +1659,9 @@ static int vc4_hdmi_audio_get_eld(struct device *dev, void *data, struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_connector *connector = &vc4_hdmi->connector; + mutex_lock(&vc4_hdmi->mutex); memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + mutex_unlock(&vc4_hdmi->mutex); return 0; } @@ -1672,6 +1883,8 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) struct cec_msg *msg = &vc4_hdmi->cec_rx_msg; unsigned int i; + lockdep_assert_held(&vc4_hdmi->hw_lock); + msg->len = 1 + ((cntrl1 & VC4_HDMI_CEC_REC_WRD_CNT_MASK) >> VC4_HDMI_CEC_REC_WRD_CNT_SHIFT); @@ -1690,11 +1903,12 @@ static void vc4_cec_read_msg(struct vc4_hdmi *vc4_hdmi, u32 cntrl1) } } -static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) +static irqreturn_t vc4_cec_irq_handler_tx_bare_locked(struct vc4_hdmi *vc4_hdmi) { - struct vc4_hdmi *vc4_hdmi = priv; u32 cntrl1; + lockdep_assert_held(&vc4_hdmi->hw_lock); + cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); vc4_hdmi->cec_tx_ok = cntrl1 & VC4_HDMI_CEC_TX_STATUS_GOOD; cntrl1 &= ~VC4_HDMI_CEC_START_XMIT_BEGIN; @@ -1703,11 +1917,24 @@ static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) return IRQ_WAKE_THREAD; } -static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) +static irqreturn_t vc4_cec_irq_handler_tx_bare(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; + irqreturn_t ret; + + spin_lock(&vc4_hdmi->hw_lock); + ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi); + spin_unlock(&vc4_hdmi->hw_lock); + + return ret; +} + +static irqreturn_t vc4_cec_irq_handler_rx_bare_locked(struct vc4_hdmi *vc4_hdmi) +{ u32 cntrl1; + lockdep_assert_held(&vc4_hdmi->hw_lock); + vc4_hdmi->cec_rx_msg.len = 0; cntrl1 = HDMI_READ(HDMI_CEC_CNTRL_1); vc4_cec_read_msg(vc4_hdmi, cntrl1); @@ -1720,6 +1947,18 @@ static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) return IRQ_WAKE_THREAD; } +static irqreturn_t vc4_cec_irq_handler_rx_bare(int irq, void *priv) +{ + struct vc4_hdmi *vc4_hdmi = priv; + irqreturn_t ret; + + spin_lock(&vc4_hdmi->hw_lock); + ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi); + spin_unlock(&vc4_hdmi->hw_lock); + + return ret; +} + static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) { struct vc4_hdmi *vc4_hdmi = priv; @@ -1730,69 +1969,142 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv) if (!(stat & VC4_HDMI_CPU_CEC)) return IRQ_NONE; + spin_lock(&vc4_hdmi->hw_lock); cntrl5 = HDMI_READ(HDMI_CEC_CNTRL_5); vc4_hdmi->cec_irq_was_rx = cntrl5 & VC4_HDMI_CEC_RX_CEC_INT; if (vc4_hdmi->cec_irq_was_rx) - ret = vc4_cec_irq_handler_rx_bare(irq, priv); + ret = vc4_cec_irq_handler_rx_bare_locked(vc4_hdmi); else - ret = vc4_cec_irq_handler_tx_bare(irq, priv); + ret = vc4_cec_irq_handler_tx_bare_locked(vc4_hdmi); HDMI_WRITE(HDMI_CEC_CPU_CLEAR, VC4_HDMI_CPU_CEC); + spin_unlock(&vc4_hdmi->hw_lock); + return ret; } -static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +static int vc4_hdmi_cec_enable(struct cec_adapter *adap) { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); /* clock period in microseconds */ const u32 usecs = 1000000 / CEC_CLOCK_FREQ; - u32 val = HDMI_READ(HDMI_CEC_CNTRL_5); + unsigned long flags; + u32 val; + int ret; + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + + ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev); + if (ret) + return ret; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + + val = HDMI_READ(HDMI_CEC_CNTRL_5); val &= ~(VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET | VC4_HDMI_CEC_CNT_TO_4700_US_MASK | VC4_HDMI_CEC_CNT_TO_4500_US_MASK); val |= ((4700 / usecs) << VC4_HDMI_CEC_CNT_TO_4700_US_SHIFT) | ((4500 / usecs) << VC4_HDMI_CEC_CNT_TO_4500_US_SHIFT); - if (enable) { - HDMI_WRITE(HDMI_CEC_CNTRL_5, val | - VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - HDMI_WRITE(HDMI_CEC_CNTRL_5, val); - HDMI_WRITE(HDMI_CEC_CNTRL_2, - ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | - ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | - ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | - ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | - ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); - HDMI_WRITE(HDMI_CEC_CNTRL_3, - ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | - ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | - ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | - ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); - HDMI_WRITE(HDMI_CEC_CNTRL_4, - ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | - ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | - ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | - ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); - - if (!vc4_hdmi->variant->external_irq_controller) - HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); - } else { - if (!vc4_hdmi->variant->external_irq_controller) - HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); - HDMI_WRITE(HDMI_CEC_CNTRL_5, val | - VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); - } + HDMI_WRITE(HDMI_CEC_CNTRL_5, val | + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); + HDMI_WRITE(HDMI_CEC_CNTRL_5, val); + HDMI_WRITE(HDMI_CEC_CNTRL_2, + ((1500 / usecs) << VC4_HDMI_CEC_CNT_TO_1500_US_SHIFT) | + ((1300 / usecs) << VC4_HDMI_CEC_CNT_TO_1300_US_SHIFT) | + ((800 / usecs) << VC4_HDMI_CEC_CNT_TO_800_US_SHIFT) | + ((600 / usecs) << VC4_HDMI_CEC_CNT_TO_600_US_SHIFT) | + ((400 / usecs) << VC4_HDMI_CEC_CNT_TO_400_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_3, + ((2750 / usecs) << VC4_HDMI_CEC_CNT_TO_2750_US_SHIFT) | + ((2400 / usecs) << VC4_HDMI_CEC_CNT_TO_2400_US_SHIFT) | + ((2050 / usecs) << VC4_HDMI_CEC_CNT_TO_2050_US_SHIFT) | + ((1700 / usecs) << VC4_HDMI_CEC_CNT_TO_1700_US_SHIFT)); + HDMI_WRITE(HDMI_CEC_CNTRL_4, + ((4300 / usecs) << VC4_HDMI_CEC_CNT_TO_4300_US_SHIFT) | + ((3900 / usecs) << VC4_HDMI_CEC_CNT_TO_3900_US_SHIFT) | + ((3600 / usecs) << VC4_HDMI_CEC_CNT_TO_3600_US_SHIFT) | + ((3500 / usecs) << VC4_HDMI_CEC_CNT_TO_3500_US_SHIFT)); + + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_CLEAR, VC4_HDMI_CPU_CEC); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } +static int vc4_hdmi_cec_disable(struct cec_adapter *adap) +{ + struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + unsigned long flags; + + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + + if (!vc4_hdmi->variant->external_irq_controller) + HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, VC4_HDMI_CPU_CEC); + + HDMI_WRITE(HDMI_CEC_CNTRL_5, HDMI_READ(HDMI_CEC_CNTRL_5) | + VC4_HDMI_CEC_TX_SW_RESET | VC4_HDMI_CEC_RX_SW_RESET); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + + pm_runtime_put(&vc4_hdmi->pdev->dev); + + return 0; +} + +static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) +{ + if (enable) + return vc4_hdmi_cec_enable(adap); + else + return vc4_hdmi_cec_disable(adap); +} + static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); + unsigned long flags; + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_CEC_CNTRL_1, (HDMI_READ(HDMI_CEC_CNTRL_1) & ~VC4_HDMI_CEC_ADDR_MASK) | (log_addr & 0xf) << VC4_HDMI_CEC_ADDR_SHIFT); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } @@ -1801,14 +2113,28 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, { struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap); struct drm_device *dev = vc4_hdmi->connector.dev; + unsigned long flags; u32 val; unsigned int i; + /* + * NOTE: This function should really take vc4_hdmi->mutex, but doing so + * results in a reentrancy since cec_s_phys_addr_from_edid() called in + * .detect or .get_modes might call .adap_enable, which leads to this + * function being called with that mutex held. + * + * Concurrency is not an issue for the moment since we don't share any + * state with KMS, so we can ignore the lock for now, but we need to + * keep it in mind if we were to change that assumption. + */ + if (msg->len > 16) { drm_err(dev, "Attempting to transmit too much data (%d)\n", msg->len); return -ENOMEM; } + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + for (i = 0; i < msg->len; i += 4) HDMI_WRITE(HDMI_CEC_TX_DATA_1 + (i >> 2), (msg->msg[i]) | @@ -1824,6 +2150,9 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, val |= VC4_HDMI_CEC_START_XMIT_BEGIN; HDMI_WRITE(HDMI_CEC_CNTRL_1, val); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); + return 0; } @@ -1838,6 +2167,7 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) struct cec_connector_info conn_info; struct platform_device *pdev = vc4_hdmi->pdev; struct device *dev = &pdev->dev; + unsigned long flags; u32 value; int ret; @@ -1857,10 +2187,12 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector); cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); value = HDMI_READ(HDMI_CEC_CNTRL_1); /* Set the logical address to Unregistered */ value |= VC4_HDMI_CEC_ADDR_MASK; HDMI_WRITE(HDMI_CEC_CNTRL_1, value); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); vc4_hdmi_cec_update_clk_div(vc4_hdmi); @@ -1879,7 +2211,9 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi) if (ret) goto err_remove_cec_rx_handler; } else { + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_CEC_CPU_MASK_SET, 0xffffffff); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); ret = request_threaded_irq(platform_get_irq(pdev, 0), vc4_cec_irq_handler, @@ -2115,8 +2449,7 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return 0; } -#ifdef CONFIG_PM -static int vc4_hdmi_runtime_suspend(struct device *dev) +static int __maybe_unused vc4_hdmi_runtime_suspend(struct device *dev) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); @@ -2136,7 +2469,6 @@ static int vc4_hdmi_runtime_resume(struct device *dev) return 0; } -#endif static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { @@ -2151,6 +2483,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi = devm_kzalloc(dev, sizeof(*vc4_hdmi), GFP_KERNEL); if (!vc4_hdmi) return -ENOMEM; + mutex_init(&vc4_hdmi->mutex); + spin_lock_init(&vc4_hdmi->hw_lock); INIT_DELAYED_WORK(&vc4_hdmi->scrambling_work, vc4_hdmi_scrambling_wq); dev_set_drvdata(dev, vc4_hdmi); @@ -2164,6 +2498,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->pdev = pdev; vc4_hdmi->variant = variant; + /* + * Since we don't know the state of the controller and its + * display (if any), let's assume it's always enabled. + * vc4_hdmi_disable_scrambling() will thus run at boot, make + * sure it's disabled, and avoid any inconsistency. + */ + vc4_hdmi->scdc_enabled = true; + ret = variant->init_resources(vc4_hdmi); if (ret) return ret; @@ -2201,6 +2543,31 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->disable_4kp60 = true; } + /* + * If we boot without any cable connected to the HDMI connector, + * the firmware will skip the HSM initialization and leave it + * with a rate of 0, resulting in a bus lockup when we're + * accessing the registers even if it's enabled. + * + * Let's put a sensible default at runtime_resume so that we + * don't end up in this situation. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + goto err_put_ddc; + + /* + * We need to have the device powered up at this point to call + * our reset hook and for the CEC init. + */ + ret = vc4_hdmi_runtime_resume(dev); + if (ret) + goto err_put_ddc; + + pm_runtime_get_noresume(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); @@ -2212,8 +2579,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); } - pm_runtime_enable(dev); - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &vc4_hdmi_encoder_helper_funcs); @@ -2237,6 +2602,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi_debugfs_regs, vc4_hdmi); + pm_runtime_put_sync(dev); + return 0; err_free_cec: @@ -2247,6 +2614,7 @@ err_destroy_conn: vc4_hdmi_connector_destroy(&vc4_hdmi->connector); err_destroy_encoder: drm_encoder_cleanup(encoder); + pm_runtime_put_sync(dev); pm_runtime_disable(dev); err_put_ddc: put_device(&vc4_hdmi->ddc->dev); @@ -2333,7 +2701,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI0, .debugfs_name = "hdmi0_regs", .card_name = "vc4-hdmi-0", - .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, + .max_pixel_clock = 600000000, .registers = vc5_hdmi_hdmi0_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), .phy_lane_mapping = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 33e9f665ab8e..36c0b082a43b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -178,6 +178,43 @@ struct vc4_hdmi { struct debugfs_regset32 hdmi_regset; struct debugfs_regset32 hd_regset; + + /** + * @hw_lock: Spinlock protecting device register access. + */ + spinlock_t hw_lock; + + /** + * @mutex: Mutex protecting the driver access across multiple + * frameworks (KMS, ALSA). + * + * NOTE: While supported, CEC has been left out since + * cec_s_phys_addr_from_edid() might call .adap_enable and lead to a + * reentrancy issue between .get_modes (or .detect) and .adap_enable. + * Since we don't share any state between the CEC hooks and KMS', it's + * not a big deal. The only trouble might come from updating the CEC + * clock divider which might be affected by a modeset, but CEC should + * be resilient to that. + */ + struct mutex mutex; + + /** + * @saved_adjusted_mode: Copy of @drm_crtc_state.adjusted_mode + * for use by ALSA hooks and interrupt handlers. Protected by @mutex. + */ + struct drm_display_mode saved_adjusted_mode; + + /** + * @output_enabled: Is the HDMI controller currently active? + * Protected by @mutex. + */ + bool output_enabled; + + /** + * @scdc_enabled: Is the HDMI controller currently running with + * the scrambler on? Protected by @mutex. + */ + bool scdc_enabled; }; static inline struct vc4_hdmi * diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c index 36535480f8e2..62148f0dc284 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c @@ -130,31 +130,49 @@ void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct vc4_hdmi_connector_state *conn_state) { + unsigned long flags; + /* PHY should be in reset, like * vc4_hdmi_encoder_disable() does. */ + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0xf << 16); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_CTL_0, HDMI_READ(HDMI_TX_PHY_CTL_0) & ~VC4_HDMI_TX_PHY_RNG_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_CTL_0, HDMI_READ(HDMI_TX_PHY_CTL_0) | VC4_HDMI_TX_PHY_RNG_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } static unsigned long long @@ -336,6 +354,8 @@ phy_get_channel_settings(enum vc4_hdmi_phy_channel chan, static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi) { + lockdep_assert_held(&vc4_hdmi->hw_lock); + HDMI_WRITE(HDMI_TX_PHY_RESET_CTL, 0x0f); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10)); } @@ -348,10 +368,13 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, unsigned long long pixel_freq = conn_state->pixel_rate; unsigned long long vco_freq; unsigned char word_sel; + unsigned long flags; u8 vco_sel, vco_div; vco_freq = phy_get_vco_freq(pixel_freq, &vco_sel, &vco_div); + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); + vc5_hdmi_reset_phy(vc4_hdmi); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, @@ -501,23 +524,37 @@ void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, HDMI_READ(HDMI_TX_PHY_RESET_CTL) | VC4_HDMI_TX_PHY_RESET_CTL_PLL_RESETB | VC4_HDMI_TX_PHY_RESET_CTL_PLLDIV_RESETB); + + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); vc5_hdmi_reset_phy(vc4_hdmi); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) & ~VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi) { + unsigned long flags; + + spin_lock_irqsave(&vc4_hdmi->hw_lock, flags); HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, HDMI_READ(HDMI_TX_PHY_POWERDOWN_CTL) | VC4_HDMI_TX_PHY_POWERDOWN_CTL_RNDGEN_PWRDN); + spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index 19d2fdc446bc..fc971506bd4f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -1,6 +1,8 @@ #ifndef _VC4_HDMI_REGS_H_ #define _VC4_HDMI_REGS_H_ +#include <linux/pm_runtime.h> + #include "vc4_hdmi.h" #define VC4_HDMI_PACKET_STRIDE 0x24 @@ -412,6 +414,8 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi, const struct vc4_hdmi_variant *variant = hdmi->variant; void __iomem *base; + WARN_ON(!pm_runtime_active(&hdmi->pdev->dev)); + if (reg >= variant->num_registers) { dev_warn(&hdmi->pdev->dev, "Invalid register ID %u\n", reg); @@ -438,6 +442,10 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, const struct vc4_hdmi_variant *variant = hdmi->variant; void __iomem *base; + lockdep_assert_held(&hdmi->hw_lock); + + WARN_ON(!pm_runtime_active(&hdmi->pdev->dev)); + if (reg >= variant->num_registers) { dev_warn(&hdmi->pdev->dev, "Invalid register ID %u\n", reg); diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index c239045e05d6..604933e20e6a 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -365,17 +365,16 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + unsigned long flags; if (crtc->state->event) { - unsigned long flags; - crtc->state->event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); - if (!vc4_state->feed_txp || vc4_state->txp_armed) { + if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) { vc4_crtc->event = crtc->state->event; crtc->state->event = NULL; } @@ -388,6 +387,22 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); } + + spin_lock_irqsave(&vc4_crtc->irq_lock, flags); + vc4_crtc->current_dlist = vc4_state->mm.start; + spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); +} + +void vc4_hvs_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + unsigned long flags; + + spin_lock_irqsave(&vc4_crtc->irq_lock, flags); + vc4_crtc->current_hvs_channel = vc4_state->assigned_channel; + spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); } void vc4_hvs_atomic_enable(struct drm_crtc *crtc, @@ -395,10 +410,9 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state); struct drm_display_mode *mode = &crtc->state->adjusted_mode; - bool oneshot = vc4_state->feed_txp; + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + bool oneshot = vc4_crtc->feeds_txp; vc4_hvs_update_dlist(crtc); vc4_hvs_init_channel(vc4, crtc, mode, oneshot); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f0b3e4cf5bce..24de29bc1cda 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -39,9 +39,11 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) struct vc4_hvs_state { struct drm_private_state base; + unsigned long core_clock_rate; struct { unsigned in_use: 1; + unsigned long fifo_load; struct drm_crtc_commit *pending_commit; } fifo_state[HVS_NUM_CHANNELS]; }; @@ -233,6 +235,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, unsigned int i; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); u32 dispctrl; u32 dsp3_mux; @@ -253,7 +256,7 @@ static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 * route. */ - if (vc4_state->feed_txp) + if (vc4_crtc->feeds_txp) dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); else dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); @@ -337,12 +340,21 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; - struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; + struct vc4_hvs_state *new_hvs_state; struct drm_crtc *crtc; struct vc4_hvs_state *old_hvs_state; + unsigned int channel; int i; + old_hvs_state = vc4_hvs_get_old_global_state(state); + if (WARN_ON(IS_ERR(old_hvs_state))) + return; + + new_hvs_state = vc4_hvs_get_new_global_state(state); + if (WARN_ON(IS_ERR(new_hvs_state))) + return; + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state; @@ -353,30 +365,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 500000000); - - old_hvs_state = vc4_hvs_get_old_global_state(state); - if (!old_hvs_state) - return; - - for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { - struct vc4_crtc_state *vc4_crtc_state = - to_vc4_crtc_state(old_crtc_state); - unsigned int channel = vc4_crtc_state->assigned_channel; + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; int ret; - if (channel == VC4_HVS_CHANNEL_DISABLED) + if (!old_hvs_state->fifo_state[channel].in_use) continue; - if (!old_hvs_state->fifo_state[channel].in_use) + commit = old_hvs_state->fifo_state[channel].pending_commit; + if (!commit) continue; - ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); + ret = drm_crtc_commit_wait(commit); if (ret) drm_err(dev, "Timed out waiting for commit\n"); + + drm_crtc_commit_put(commit); + old_hvs_state->fifo_state[channel].pending_commit = NULL; } + if (vc4->hvs->hvs5) { + unsigned long core_rate = max_t(unsigned long, + 500000000, + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, core_rate); + } drm_atomic_helper_commit_modeset_disables(dev, state); vc4_ctm_commit(vc4, state); @@ -398,8 +412,12 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 0); + if (vc4->hvs->hvs5) { + drm_dbg(dev, "Running the core clock at %lu Hz\n", + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate); + } } static int vc4_atomic_commit_setup(struct drm_atomic_state *state) @@ -410,8 +428,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) unsigned int i; hvs_state = vc4_hvs_get_new_global_state(state); - if (!hvs_state) - return -EINVAL; + if (WARN_ON(IS_ERR(hvs_state))) + return PTR_ERR(hvs_state); for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state = @@ -551,9 +569,6 @@ static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) struct drm_plane *plane; int i; - if (!vc4->load_tracker_available) - return 0; - priv_state = drm_atomic_get_private_obj_state(state, &vc4->load_tracker); if (IS_ERR(priv_state)) @@ -628,9 +643,6 @@ static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (!vc4->load_tracker_available) - return; - drm_atomic_private_obj_fini(&vc4->load_tracker); } @@ -638,9 +650,6 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) { struct vc4_load_tracker_state *load_state; - if (!vc4->load_tracker_available) - return 0; - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); if (!load_state) return -ENOMEM; @@ -665,17 +674,13 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); - for (i = 0; i < HVS_NUM_CHANNELS; i++) { state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; - - if (!old_state->fifo_state[i].pending_commit) - continue; - - state->fifo_state[i].pending_commit = - drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); + state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; } + state->core_clock_rate = old_state->core_clock_rate; + return &state->base; } @@ -762,8 +767,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, unsigned int i; hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) if (!hvs_new_state->fifo_state[i].in_use) @@ -831,6 +836,76 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, } static int +vc4_core_clock_atomic_check(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + struct vc4_hvs_state *hvs_new_state; + struct vc4_load_tracker_state *load_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + unsigned int num_outputs; + unsigned long pixel_rate; + unsigned long cob_rate; + unsigned int i; + + priv_state = drm_atomic_get_private_obj_state(state, + &vc4->load_tracker); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + load_state = to_vc4_load_tracker_state(priv_state); + + hvs_new_state = vc4_hvs_get_global_state(state); + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); + + for_each_oldnew_crtc_in_state(state, crtc, + old_crtc_state, + new_crtc_state, + i) { + if (old_crtc_state->active) { + struct vc4_crtc_state *old_vc4_state = + to_vc4_crtc_state(old_crtc_state); + unsigned int channel = old_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = 0; + } + + if (new_crtc_state->active) { + struct vc4_crtc_state *new_vc4_state = + to_vc4_crtc_state(new_crtc_state); + unsigned int channel = new_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = + new_vc4_state->hvs_load; + } + } + + cob_rate = 0; + num_outputs = 0; + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + if (!hvs_new_state->fifo_state[i].in_use) + continue; + + num_outputs++; + cob_rate += hvs_new_state->fifo_state[i].fifo_load; + } + + pixel_rate = load_state->hvs_load; + if (num_outputs > 1) { + pixel_rate = (pixel_rate * 40) / 100; + } else { + pixel_rate = (pixel_rate * 60) / 100; + } + + hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); + + return 0; +} + + +static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; @@ -847,7 +922,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) if (ret) return ret; - return vc4_load_tracker_atomic_check(state); + ret = vc4_load_tracker_atomic_check(state); + if (ret) + return ret; + + return vc4_core_clock_atomic_check(state); } static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { @@ -868,9 +947,12 @@ int vc4_kms_load(struct drm_device *dev) "brcm,bcm2711-vc5"); int ret; + /* + * The limits enforced by the load tracker aren't relevant for + * the BCM2711, but the load tracker computations are used for + * the core clock rate calculation. + */ if (!is_vc5) { - vc4->load_tracker_available = true; - /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 19161b6ab27f..920a9eefe426 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -33,6 +33,7 @@ static const struct hvs_format { u32 hvs; /* HVS_FORMAT_* */ u32 pixel_order; u32 pixel_order_hvs5; + bool hvs5_only; } hvs_formats[] = { { .drm = DRM_FORMAT_XRGB8888, @@ -128,6 +129,12 @@ static const struct hvs_format { .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, .pixel_order = HVS_PIXEL_ORDER_XYCRCB, }, + { + .drm = DRM_FORMAT_P030, + .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT, + .pixel_order = HVS_PIXEL_ORDER_XYCBCR, + .hvs5_only = true, + }, }; static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) @@ -529,11 +536,6 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) struct vc4_plane_state *vc4_state; struct drm_crtc_state *crtc_state; unsigned int vscale_factor; - struct vc4_dev *vc4; - - vc4 = to_vc4_dev(state->plane->dev); - if (!vc4->load_tracker_available) - return; vc4_state = to_vc4_plane_state(state); crtc_state = drm_atomic_get_existing_crtc_state(state->state, @@ -621,6 +623,51 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) return 0; } +/* + * The colorspace conversion matrices are held in 3 entries in the dlist. + * Create an array of them, with entries for each full and limited mode, and + * each supported colorspace. + */ +static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = { + { + /* Limited range */ + { + /* BT601 */ + SCALER_CSC0_ITR_R_601_5, + SCALER_CSC1_ITR_R_601_5, + SCALER_CSC2_ITR_R_601_5, + }, { + /* BT709 */ + SCALER_CSC0_ITR_R_709_3, + SCALER_CSC1_ITR_R_709_3, + SCALER_CSC2_ITR_R_709_3, + }, { + /* BT2020 */ + SCALER_CSC0_ITR_R_2020, + SCALER_CSC1_ITR_R_2020, + SCALER_CSC2_ITR_R_2020, + } + }, { + /* Full range */ + { + /* JFIF */ + SCALER_CSC0_JPEG_JFIF, + SCALER_CSC1_JPEG_JFIF, + SCALER_CSC2_JPEG_JFIF, + }, { + /* BT709 */ + SCALER_CSC0_ITR_R_709_3_FR, + SCALER_CSC1_ITR_R_709_3_FR, + SCALER_CSC2_ITR_R_709_3_FR, + }, { + /* BT2020 */ + SCALER_CSC0_ITR_R_2020_FR, + SCALER_CSC1_ITR_R_2020_FR, + SCALER_CSC2_ITR_R_2020_FR, + } + } +}; + /* Writes out a full display list for an active plane to the plane's * private dlist state. */ @@ -767,47 +814,90 @@ static int vc4_plane_mode_set(struct drm_plane *plane, case DRM_FORMAT_MOD_BROADCOM_SAND128: case DRM_FORMAT_MOD_BROADCOM_SAND256: { uint32_t param = fourcc_mod_broadcom_param(fb->modifier); - u32 tile_w, tile, x_off, pix_per_tile; - - hvs_format = HVS_PIXEL_FORMAT_H264; - - switch (base_format_mod) { - case DRM_FORMAT_MOD_BROADCOM_SAND64: - tiling = SCALER_CTL0_TILING_64B; - tile_w = 64; - break; - case DRM_FORMAT_MOD_BROADCOM_SAND128: - tiling = SCALER_CTL0_TILING_128B; - tile_w = 128; - break; - case DRM_FORMAT_MOD_BROADCOM_SAND256: - tiling = SCALER_CTL0_TILING_256B_OR_T; - tile_w = 256; - break; - default: - break; - } if (param > SCALER_TILE_HEIGHT_MASK) { - DRM_DEBUG_KMS("SAND height too large (%d)\n", param); + DRM_DEBUG_KMS("SAND height too large (%d)\n", + param); return -EINVAL; } - pix_per_tile = tile_w / fb->format->cpp[0]; - tile = vc4_state->src_x / pix_per_tile; - x_off = vc4_state->src_x % pix_per_tile; + if (fb->format->format == DRM_FORMAT_P030) { + hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT; + tiling = SCALER_CTL0_TILING_128B; + } else { + hvs_format = HVS_PIXEL_FORMAT_H264; + + switch (base_format_mod) { + case DRM_FORMAT_MOD_BROADCOM_SAND64: + tiling = SCALER_CTL0_TILING_64B; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND128: + tiling = SCALER_CTL0_TILING_128B; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND256: + tiling = SCALER_CTL0_TILING_256B_OR_T; + break; + default: + return -EINVAL; + } + } /* Adjust the base pointer to the first pixel to be scanned * out. + * + * For P030, y_ptr [31:4] is the 128bit word for the start pixel + * y_ptr [3:0] is the pixel (0-11) contained within that 128bit + * word that should be taken as the first pixel. + * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the + * element within the 128bit word, eg for pixel 3 the value + * should be 6. */ for (i = 0; i < num_planes; i++) { + u32 tile_w, tile, x_off, pix_per_tile; + + if (fb->format->format == DRM_FORMAT_P030) { + /* + * Spec says: bits [31:4] of the given address + * should point to the 128-bit word containing + * the desired starting pixel, and bits[3:0] + * should be between 0 and 11, indicating which + * of the 12-pixels in that 128-bit word is the + * first pixel to be used + */ + u32 remaining_pixels = vc4_state->src_x % 96; + u32 aligned = remaining_pixels / 12; + u32 last_bits = remaining_pixels % 12; + + x_off = aligned * 16 + last_bits; + tile_w = 128; + pix_per_tile = 96; + } else { + switch (base_format_mod) { + case DRM_FORMAT_MOD_BROADCOM_SAND64: + tile_w = 64; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND128: + tile_w = 128; + break; + case DRM_FORMAT_MOD_BROADCOM_SAND256: + tile_w = 256; + break; + default: + return -EINVAL; + } + pix_per_tile = tile_w / fb->format->cpp[0]; + x_off = (vc4_state->src_x % pix_per_tile) / + (i ? h_subsample : 1) * + fb->format->cpp[i]; + } + + tile = vc4_state->src_x / pix_per_tile; + vc4_state->offsets[i] += param * tile_w * tile; vc4_state->offsets[i] += src_y / (i ? v_subsample : 1) * tile_w; - vc4_state->offsets[i] += x_off / - (i ? h_subsample : 1) * - fb->format->cpp[i]; + vc4_state->offsets[i] += x_off & ~(i ? 1 : 0); } pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); @@ -960,7 +1050,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Pitch word 1/2 */ for (i = 1; i < num_planes; i++) { - if (hvs_format != HVS_PIXEL_FORMAT_H264) { + if (hvs_format != HVS_PIXEL_FORMAT_H264 && + hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) { vc4_dlist_write(vc4_state, VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH)); @@ -971,9 +1062,20 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Colorspace conversion words */ if (vc4_state->is_yuv) { - vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); - vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); - vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); + enum drm_color_encoding color_encoding = state->color_encoding; + enum drm_color_range color_range = state->color_range; + const u32 *ccm; + + if (color_encoding >= DRM_COLOR_ENCODING_MAX) + color_encoding = DRM_COLOR_YCBCR_BT601; + if (color_range >= DRM_COLOR_RANGE_MAX) + color_range = DRM_COLOR_YCBCR_LIMITED_RANGE; + + ccm = colorspace_coeffs[color_range][color_encoding]; + + vc4_dlist_write(vc4_state, ccm[0]); + vc4_dlist_write(vc4_state, ccm[1]); + vc4_dlist_write(vc4_state, ccm[2]); } vc4_state->lbm_offset = 0; @@ -1320,6 +1422,13 @@ static bool vc4_format_mod_supported(struct drm_plane *plane, default: return false; } + case DRM_FORMAT_P030: + switch (fourcc_mod_broadcom_mod(modifier)) { + case DRM_FORMAT_MOD_BROADCOM_SAND128: + return true; + default: + return false; + } case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_RGBA1010102: @@ -1352,8 +1461,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, struct drm_plane *plane = NULL; struct vc4_plane *vc4_plane; u32 formats[ARRAY_SIZE(hvs_formats)]; + int num_formats = 0; int ret = 0; unsigned i; + bool hvs5 = of_device_is_compatible(dev->dev->of_node, + "brcm,bcm2711-vc5"); static const uint64_t modifiers[] = { DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, DRM_FORMAT_MOD_BROADCOM_SAND128, @@ -1368,13 +1480,17 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, if (!vc4_plane) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) - formats[i] = hvs_formats[i].drm; + for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { + if (!hvs_formats[i].hvs5_only || hvs5) { + formats[num_formats] = hvs_formats[i].drm; + num_formats++; + } + } plane = &vc4_plane->base; ret = drm_universal_plane_init(dev, plane, 0, &vc4_plane_funcs, - formats, ARRAY_SIZE(formats), + formats, num_formats, modifiers, type, NULL); if (ret) return ERR_PTR(ret); @@ -1388,6 +1504,15 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y); + drm_plane_create_color_properties(plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, + DRM_COLOR_YCBCR_LIMITED_RANGE); + return plane; } diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 489f921ef44d..7538b84a6dca 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -975,7 +975,10 @@ enum hvs_pixel_format { #define SCALER_CSC0_COEF_CR_OFS_SHIFT 0 #define SCALER_CSC0_ITR_R_601_5 0x00f00000 #define SCALER_CSC0_ITR_R_709_3 0x00f00000 +#define SCALER_CSC0_ITR_R_2020 0x00f00000 #define SCALER_CSC0_JPEG_JFIF 0x00000000 +#define SCALER_CSC0_ITR_R_709_3_FR 0x00000000 +#define SCALER_CSC0_ITR_R_2020_FR 0x00000000 /* S2.8 contribution of Cb to Green */ #define SCALER_CSC1_COEF_CB_GRN_MASK VC4_MASK(31, 22) @@ -990,8 +993,11 @@ enum hvs_pixel_format { #define SCALER_CSC1_COEF_CR_BLU_MASK VC4_MASK(1, 0) #define SCALER_CSC1_COEF_CR_BLU_SHIFT 0 #define SCALER_CSC1_ITR_R_601_5 0xe73304a8 -#define SCALER_CSC1_ITR_R_709_3 0xf2b784a8 -#define SCALER_CSC1_JPEG_JFIF 0xea34a400 +#define SCALER_CSC1_ITR_R_709_3 0xf27784a8 +#define SCALER_CSC1_ITR_R_2020 0xf43594a8 +#define SCALER_CSC1_JPEG_JFIF 0xea349400 +#define SCALER_CSC1_ITR_R_709_3_FR 0xf4388400 +#define SCALER_CSC1_ITR_R_2020_FR 0xf5b6d400 /* S2.8 contribution of Cb to Red */ #define SCALER_CSC2_COEF_CB_RED_MASK VC4_MASK(29, 20) @@ -1002,9 +1008,12 @@ enum hvs_pixel_format { /* S2.8 contribution of Cb to Blue */ #define SCALER_CSC2_COEF_CB_BLU_MASK VC4_MASK(19, 10) #define SCALER_CSC2_COEF_CB_BLU_SHIFT 10 -#define SCALER_CSC2_ITR_R_601_5 0x00066204 -#define SCALER_CSC2_ITR_R_709_3 0x00072a1c -#define SCALER_CSC2_JPEG_JFIF 0x000599c5 +#define SCALER_CSC2_ITR_R_601_5 0x00066604 +#define SCALER_CSC2_ITR_R_709_3 0x00072e1d +#define SCALER_CSC2_ITR_R_2020 0x0006b624 +#define SCALER_CSC2_JPEG_JFIF 0x00059dc6 +#define SCALER_CSC2_ITR_R_709_3_FR 0x00064ddb +#define SCALER_CSC2_ITR_R_2020_FR 0x0005e5e2 #define SCALER_TPZ0_VERT_RECALC BIT(31) #define SCALER_TPZ0_SCALE_MASK VC4_MASK(28, 8) diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 2fc7f4b5fa09..9809ca3e2945 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -391,7 +391,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); int ret; ret = vc4_hvs_atomic_check(crtc, state); @@ -399,7 +398,6 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, return ret; crtc_state->no_vblank = true; - vc4_state->feed_txp = true; return 0; } @@ -437,6 +435,7 @@ static void vc4_txp_atomic_disable(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs vc4_txp_crtc_helper_funcs = { .atomic_check = vc4_txp_atomic_check, + .atomic_begin = vc4_hvs_atomic_begin, .atomic_flush = vc4_hvs_atomic_flush, .atomic_enable = vc4_txp_atomic_enable, .atomic_disable = vc4_txp_atomic_disable, @@ -482,6 +481,7 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data) vc4_crtc->pdev = pdev; vc4_crtc->data = &vc4_txp_crtc_data; + vc4_crtc->feeds_txp = true; txp->pdev = pdev; diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index a87eafa89e9f..c5e3e5457737 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -97,7 +97,7 @@ static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, siz obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) - return NULL; + return ERR_PTR(-ENOMEM); /* * vgem doesn't have any begin/end cpu access ioctls, therefore must use diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index d86e1ad4a972..5f25a8d15464 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -27,7 +27,6 @@ */ #include <linux/module.h> -#include <linux/console.h> #include <linux/pci.h> #include <linux/poll.h> #include <linux/wait.h> @@ -104,7 +103,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) struct drm_device *dev; int ret; - if (vgacon_text_force() && virtio_gpu_modeset == -1) + if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1) return -EINVAL; if (virtio_gpu_modeset == 0) @@ -157,36 +156,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) schedule_work(&vgdev->config_changed_work); } -static __poll_t virtio_gpu_poll(struct file *filp, - struct poll_table_struct *wait) -{ - struct drm_file *drm_file = filp->private_data; - struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_device *dev = drm_file->minor->dev; - struct virtio_gpu_device *vgdev = dev->dev_private; - struct drm_pending_event *e = NULL; - __poll_t mask = 0; - - if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) - return drm_poll(filp, wait); - - poll_wait(filp, &drm_file->event_wait, wait); - - if (!list_empty(&drm_file->event_list)) { - spin_lock_irq(&dev->event_lock); - e = list_first_entry(&drm_file->event_list, - struct drm_pending_event, link); - drm_file->event_space += e->event->length; - list_del(&e->link); - spin_unlock_irq(&dev->event_lock); - - kfree(e); - mask |= EPOLLIN | EPOLLRDNORM; - } - - return mask; -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -226,17 +195,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = virtio_gpu_poll, - .read = drm_read, - .llseek = noop_llseek, - .mmap = drm_gem_mmap -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e0265fe74aa5..0a194aaad419 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver { spinlock_t lock; }; -#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000 struct virtio_gpu_fence_event { struct drm_pending_event base; struct drm_event event; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5618a1d5879c..c708bab555c6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, if (!e) return -ENOMEM; - e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; e->event.length = sizeof(e->event); ret = drm_event_reserve_init(dev, file, &e->base, &e->event); @@ -774,7 +774,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, goto out_unlock; } - if ((vgdev->capset_id_mask & (1 << value)) == 0) { + if ((vgdev->capset_id_mask & (1ULL << value)) == 0) { ret = -EINVAL; goto out_unlock; } @@ -819,7 +819,7 @@ static int virtio_gpu_context_init_ioctl(struct drm_device *dev, if (vfpriv->ring_idx_mask) { valid_ring_mask = 0; for (i = 0; i < vfpriv->num_rings; i++) - valid_ring_mask |= 1 << i; + valid_ring_mask |= 1ULL << i; if (~valid_ring_mask & vfpriv->ring_idx_mask) { ret = -EINVAL; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index f648b0e24447..baef2c5f2aaf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -79,10 +79,10 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) sg_free_table(shmem->pages); kfree(shmem->pages); shmem->pages = NULL; - drm_gem_shmem_unpin(&bo->base.base); + drm_gem_shmem_unpin(&bo->base); } - drm_gem_shmem_free_object(&bo->base.base); + drm_gem_shmem_free(&bo->base); } else if (virtio_gpu_is_vram(bo)) { struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); @@ -116,15 +116,14 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { .free = virtio_gpu_free_object, .open = virtio_gpu_gem_object_open, .close = virtio_gpu_gem_object_close, - - .print_info = drm_gem_shmem_print_info, + .print_info = drm_gem_shmem_object_print_info, .export = virtgpu_gem_prime_export, - .pin = drm_gem_shmem_pin, - .unpin = drm_gem_shmem_unpin, - .get_sg_table = drm_gem_shmem_get_sg_table, - .vmap = drm_gem_shmem_vmap, - .vunmap = drm_gem_shmem_vunmap, - .mmap = drm_gem_shmem_mmap, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, }; bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) @@ -140,7 +139,7 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); if (!shmem) - return NULL; + return ERR_PTR(-ENOMEM); dshmem = &shmem->base.base; dshmem->base.funcs = &virtio_gpu_shmem_funcs; @@ -157,7 +156,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct scatterlist *sg; int si, ret; - ret = drm_gem_shmem_pin(&bo->base.base); + ret = drm_gem_shmem_pin(&bo->base); if (ret < 0) return -EINVAL; @@ -167,9 +166,9 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, * dma-ops. This is discouraged for other drivers, but should be fine * since virtio_gpu doesn't support dma-buf import from other devices. */ - shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base); + shmem->pages = drm_gem_shmem_get_sg_table(&bo->base); if (!shmem->pages) { - drm_gem_shmem_unpin(&bo->base.base); + drm_gem_shmem_unpin(&bo->base); return -EINVAL; } @@ -277,6 +276,6 @@ err_put_objs: err_put_id: virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); err_free_gem: - drm_gem_shmem_free_object(&shmem_obj->base); + drm_gem_shmem_free(shmem_obj); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index c9ce47c448e0..a4fabe208d9f 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig @@ -4,6 +4,7 @@ config DRM_VMWGFX depends on DRM && PCI && MMU depends on X86 || ARM64 select DRM_TTM + select DRM_TTM_HELPER select MAPPING_DIRTY_HELPERS # Only needed for the transitional use of drm_crtc_init - can be removed # again once vmwgfx sets up the primary plane itself. diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index bc323f7d4032..e9d13f155d8c 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ +vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ @@ -9,7 +9,8 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - vmwgfx_devcaps.o ttm_object.o ttm_memory.o + vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ + vmwgfx_gem.o vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h index 945c84b27e81..d90d940ad3f4 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_cmd.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h index 379ec15c7758..815d0ab0053f 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_devcaps.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_devcaps.h -- @@ -347,6 +347,10 @@ typedef uint32 SVGA3dDevCapIndex; #define SVGA3D_DEVCAP_SM5 258 #define SVGA3D_DEVCAP_MULTISAMPLE_8X 259 +#define SVGA3D_DEVCAP_MAX_FORCED_SAMPLE_COUNT 260 + +#define SVGA3D_DEVCAP_GL43 261 + #define SVGA3D_DEVCAP_MAX 262 #define SVGA3D_DXFMT_SUPPORTED (1 << 0) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h index 5af442dad542..925bf4b93f01 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_dx.h -- @@ -508,11 +508,11 @@ typedef struct SVGA3dCmdDXSetPredication { #pragma pack(pop) #pragma pack(push, 1) -typedef struct MKS3dDXSOState { +typedef struct SVGA3dDXSOState { uint32 offset; uint32 intOffset; - uint32 vertexCount; - uint32 dead; + uint32 dead1; + uint32 dead2; } SVGA3dDXSOState; #pragma pack(pop) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h index 35494a728c7a..6103b41fe92b 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_limits.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_limits.h -- @@ -82,4 +82,6 @@ #define SVGA3D_MIN_SBX_DATA_SIZE (GBYTES_2_BYTES(1)) #define SVGA3D_MAX_SBX_DATA_SIZE (GBYTES_2_BYTES(4)) +#define SVGA3D_MIN_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(900)) +#define SVGA3D_MAX_SBX_DATA_SIZE_DVM (MBYTES_2_BYTES(910)) #endif diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h index 988d8509c472..b24b4f55c941 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_reg.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2015 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_reg.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h index 70b88ee16cf6..e9219eb380a2 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_types.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2012-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga3d_types.h -- @@ -370,7 +370,6 @@ typedef enum SVGA3dSurfaceFormat { #define SVGA3D_SURFACE_TRANSFER_FROM_BUFFER (CONST64U(1) << 30) #define SVGA3D_SURFACE_RESERVED1 (CONST64U(1) << 31) -#define SVGA3D_SURFACE_VADECODE SVGA3D_SURFACE_RESERVED1 #define SVGA3D_SURFACE_MULTISAMPLE (CONST64U(1) << 32) diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h index bf242c21f352..405f20fc26c2 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_escape.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2007,2020 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_escape.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h index aec17c3c6c29..691f48f77e33 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_overlay.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 2007-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_overlay.h -- diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h index b3602557de2e..acabdb550c10 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h @@ -1,6 +1,6 @@ -/********************************************************** +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* * Copyright 1998-2021 VMware, Inc. - * SPDX-License-Identifier: GPL-2.0 OR MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation @@ -22,7 +22,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - **********************************************************/ + */ /* * svga_reg.h -- @@ -442,6 +442,7 @@ typedef struct { #define SVGA_CAP2_TRACE_FULL_FB 0x00002000 #define SVGA_CAP2_EXTRA_REGS 0x00004000 #define SVGA_CAP2_LO_STAGING 0x00008000 +#define SVGA_CAP2_VIDEO_BLT 0x00010000 #define SVGA_CAP2_RESERVED 0x80000000 typedef enum { @@ -450,9 +451,10 @@ typedef enum { SVGABackdoorCap3dHWVersion = 2, SVGABackdoorCapDeviceCaps2 = 3, SVGABackdoorCapDevelCaps = 4, - SVGABackdoorDevelRenderer = 5, - SVGABackdoorDevelUsingISB = 6, - SVGABackdoorCapMax = 7, + SVGABackdoorCapDevCaps = 5, + SVGABackdoorDevelRenderer = 6, + SVGABackdoorDevelUsingISB = 7, + SVGABackdoorCapMax = 8, } SVGABackdoorCapType; enum { diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c deleted file mode 100644 index edd17c30d5a5..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.c +++ /dev/null @@ -1,684 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/spinlock.h> -#include <linux/sched.h> -#include <linux/wait.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/swap.h> - -#include <drm/drm_device.h> -#include <drm/drm_file.h> -#include <drm/ttm/ttm_device.h> - -#include "ttm_memory.h" - -#define TTM_MEMORY_ALLOC_RETRIES 4 - -struct ttm_mem_global ttm_mem_glob; -EXPORT_SYMBOL(ttm_mem_glob); - -struct ttm_mem_zone { - struct kobject kobj; - struct ttm_mem_global *glob; - const char *name; - uint64_t zone_mem; - uint64_t emer_mem; - uint64_t max_mem; - uint64_t swap_limit; - uint64_t used_mem; -}; - -static struct attribute ttm_mem_sys = { - .name = "zone_memory", - .mode = S_IRUGO -}; -static struct attribute ttm_mem_emer = { - .name = "emergency_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_max = { - .name = "available_memory", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_swap = { - .name = "swap_limit", - .mode = S_IRUGO | S_IWUSR -}; -static struct attribute ttm_mem_used = { - .name = "used_memory", - .mode = S_IRUGO -}; - -static void ttm_mem_zone_kobj_release(struct kobject *kobj) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - - pr_info("Zone %7s: Used memory at exit: %llu KiB\n", - zone->name, (unsigned long long)zone->used_mem >> 10); - kfree(zone); -} - -static ssize_t ttm_mem_zone_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - uint64_t val = 0; - - spin_lock(&zone->glob->lock); - if (attr == &ttm_mem_sys) - val = zone->zone_mem; - else if (attr == &ttm_mem_emer) - val = zone->emer_mem; - else if (attr == &ttm_mem_max) - val = zone->max_mem; - else if (attr == &ttm_mem_swap) - val = zone->swap_limit; - else if (attr == &ttm_mem_used) - val = zone->used_mem; - spin_unlock(&zone->glob->lock); - - return snprintf(buffer, PAGE_SIZE, "%llu\n", - (unsigned long long) val >> 10); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob); - -static ssize_t ttm_mem_zone_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t size) -{ - struct ttm_mem_zone *zone = - container_of(kobj, struct ttm_mem_zone, kobj); - int chars; - unsigned long val; - uint64_t val64; - - chars = sscanf(buffer, "%lu", &val); - if (chars == 0) - return size; - - val64 = val; - val64 <<= 10; - - spin_lock(&zone->glob->lock); - if (val64 > zone->zone_mem) - val64 = zone->zone_mem; - if (attr == &ttm_mem_emer) { - zone->emer_mem = val64; - if (zone->max_mem > val64) - zone->max_mem = val64; - } else if (attr == &ttm_mem_max) { - zone->max_mem = val64; - if (zone->emer_mem < val64) - zone->emer_mem = val64; - } else if (attr == &ttm_mem_swap) - zone->swap_limit = val64; - spin_unlock(&zone->glob->lock); - - ttm_check_swapping(zone->glob); - - return size; -} - -static struct attribute *ttm_mem_zone_attrs[] = { - &ttm_mem_sys, - &ttm_mem_emer, - &ttm_mem_max, - &ttm_mem_swap, - &ttm_mem_used, - NULL -}; - -static const struct sysfs_ops ttm_mem_zone_ops = { - .show = &ttm_mem_zone_show, - .store = &ttm_mem_zone_store -}; - -static struct kobj_type ttm_mem_zone_kobj_type = { - .release = &ttm_mem_zone_kobj_release, - .sysfs_ops = &ttm_mem_zone_ops, - .default_attrs = ttm_mem_zone_attrs, -}; - -static struct attribute ttm_mem_global_lower_mem_limit = { - .name = "lower_mem_limit", - .mode = S_IRUGO | S_IWUSR -}; - -static ssize_t ttm_mem_global_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct ttm_mem_global *glob = - container_of(kobj, struct ttm_mem_global, kobj); - uint64_t val = 0; - - spin_lock(&glob->lock); - val = glob->lower_mem_limit; - spin_unlock(&glob->lock); - /* convert from number of pages to KB */ - val <<= (PAGE_SHIFT - 10); - return snprintf(buffer, PAGE_SIZE, "%llu\n", - (unsigned long long) val); -} - -static ssize_t ttm_mem_global_store(struct kobject *kobj, - struct attribute *attr, - const char *buffer, - size_t size) -{ - int chars; - uint64_t val64; - unsigned long val; - struct ttm_mem_global *glob = - container_of(kobj, struct ttm_mem_global, kobj); - - chars = sscanf(buffer, "%lu", &val); - if (chars == 0) - return size; - - val64 = val; - /* convert from KB to number of pages */ - val64 >>= (PAGE_SHIFT - 10); - - spin_lock(&glob->lock); - glob->lower_mem_limit = val64; - spin_unlock(&glob->lock); - - return size; -} - -static struct attribute *ttm_mem_global_attrs[] = { - &ttm_mem_global_lower_mem_limit, - NULL -}; - -static const struct sysfs_ops ttm_mem_global_ops = { - .show = &ttm_mem_global_show, - .store = &ttm_mem_global_store, -}; - -static struct kobj_type ttm_mem_glob_kobj_type = { - .sysfs_ops = &ttm_mem_global_ops, - .default_attrs = ttm_mem_global_attrs, -}; - -static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, - bool from_wq, uint64_t extra) -{ - unsigned int i; - struct ttm_mem_zone *zone; - uint64_t target; - - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - - if (from_wq) - target = zone->swap_limit; - else if (capable(CAP_SYS_ADMIN)) - target = zone->emer_mem; - else - target = zone->max_mem; - - target = (extra > target) ? 0ULL : target; - - if (zone->used_mem > target) - return true; - } - return false; -} - -/* - * At this point we only support a single shrink callback. - * Extend this if needed, perhaps using a linked list of callbacks. - * Note that this function is reentrant: - * many threads may try to swap out at any given time. - */ - -static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, - uint64_t extra, struct ttm_operation_ctx *ctx) -{ - int ret; - - spin_lock(&glob->lock); - - while (ttm_zones_above_swap_target(glob, from_wq, extra)) { - spin_unlock(&glob->lock); - ret = ttm_global_swapout(ctx, GFP_KERNEL); - spin_lock(&glob->lock); - if (unlikely(ret <= 0)) - break; - } - - spin_unlock(&glob->lock); -} - -static void ttm_shrink_work(struct work_struct *work) -{ - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - struct ttm_mem_global *glob = - container_of(work, struct ttm_mem_global, work); - - ttm_shrink(glob, true, 0ULL, &ctx); -} - -static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram - si->totalhigh; - mem *= si->mem_unit; - - zone->name = "kernel"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_kernel = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} - -#ifdef CONFIG_HIGHMEM -static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone; - uint64_t mem; - int ret; - - if (si->totalhigh == 0) - return 0; - - zone = kzalloc(sizeof(*zone), GFP_KERNEL); - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - zone->name = "highmem"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_highmem = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", - zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#else -static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, - const struct sysinfo *si) -{ - struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); - uint64_t mem; - int ret; - - if (unlikely(!zone)) - return -ENOMEM; - - mem = si->totalram; - mem *= si->mem_unit; - - /** - * No special dma32 zone needed. - */ - - if (mem <= ((uint64_t) 1ULL << 32)) { - kfree(zone); - return 0; - } - - /* - * Limit max dma32 memory to 4GB for now - * until we can figure out how big this - * zone really is. - */ - - mem = ((uint64_t) 1ULL << 32); - zone->name = "dma32"; - zone->zone_mem = mem; - zone->max_mem = mem >> 1; - zone->emer_mem = (mem >> 1) + (mem >> 2); - zone->swap_limit = zone->max_mem - (mem >> 3); - zone->used_mem = 0; - zone->glob = glob; - glob->zone_dma32 = zone; - ret = kobject_init_and_add( - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); - if (unlikely(ret != 0)) { - kobject_put(&zone->kobj); - return ret; - } - glob->zones[glob->num_zones++] = zone; - return 0; -} -#endif - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) -{ - struct sysinfo si; - int ret; - int i; - struct ttm_mem_zone *zone; - - spin_lock_init(&glob->lock); - glob->swap_queue = create_singlethread_workqueue("ttm_swap"); - INIT_WORK(&glob->work, ttm_shrink_work); - - ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type, - &dev->kobj, "memory_accounting"); - if (unlikely(ret != 0)) { - kobject_put(&glob->kobj); - return ret; - } - - si_meminfo(&si); - - spin_lock(&glob->lock); - /* set it as 0 by default to keep original behavior of OOM */ - glob->lower_mem_limit = 0; - spin_unlock(&glob->lock); - - ret = ttm_mem_init_kernel_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#ifdef CONFIG_HIGHMEM - ret = ttm_mem_init_highmem_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#else - ret = ttm_mem_init_dma32_zone(glob, &si); - if (unlikely(ret != 0)) - goto out_no_zone; -#endif - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - pr_info("Zone %7s: Available graphics memory: %llu KiB\n", - zone->name, (unsigned long long)zone->max_mem >> 10); - } - return 0; -out_no_zone: - ttm_mem_global_release(glob); - return ret; -} - -void ttm_mem_global_release(struct ttm_mem_global *glob) -{ - struct ttm_mem_zone *zone; - unsigned int i; - - flush_workqueue(glob->swap_queue); - destroy_workqueue(glob->swap_queue); - glob->swap_queue = NULL; - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - kobject_del(&zone->kobj); - kobject_put(&zone->kobj); - } - kobject_del(&glob->kobj); - kobject_put(&glob->kobj); - memset(glob, 0, sizeof(*glob)); -} - -static void ttm_check_swapping(struct ttm_mem_global *glob) -{ - bool needs_swapping = false; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (zone->used_mem > zone->swap_limit) { - needs_swapping = true; - break; - } - } - - spin_unlock(&glob->lock); - - if (unlikely(needs_swapping)) - (void)queue_work(glob->swap_queue, &glob->work); - -} - -static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount) -{ - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem -= amount; - } - spin_unlock(&glob->lock); -} - -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount) -{ - return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); -} -EXPORT_SYMBOL(ttm_mem_global_free); - -/* - * check if the available mem is under lower memory limit - * - * a. if no swap disk at all or free swap space is under swap_mem_limit - * but available system mem is bigger than sys_mem_limit, allow TTM - * allocation; - * - * b. if the available system mem is less than sys_mem_limit but free - * swap disk is bigger than swap_mem_limit, allow TTM allocation. - */ -bool -ttm_check_under_lowerlimit(struct ttm_mem_global *glob, - uint64_t num_pages, - struct ttm_operation_ctx *ctx) -{ - int64_t available; - - /* We allow over commit during suspend */ - if (ctx->force_alloc) - return false; - - available = get_nr_swap_pages() + si_mem_available(); - available -= num_pages; - if (available < glob->lower_mem_limit) - return true; - - return false; -} - -static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t amount, bool reserve) -{ - uint64_t limit; - int ret = -ENOMEM; - unsigned int i; - struct ttm_mem_zone *zone; - - spin_lock(&glob->lock); - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - - limit = (capable(CAP_SYS_ADMIN)) ? - zone->emer_mem : zone->max_mem; - - if (zone->used_mem > limit) - goto out_unlock; - } - - if (reserve) { - for (i = 0; i < glob->num_zones; ++i) { - zone = glob->zones[i]; - if (single_zone && zone != single_zone) - continue; - zone->used_mem += amount; - } - } - - ret = 0; -out_unlock: - spin_unlock(&glob->lock); - ttm_check_swapping(glob); - - return ret; -} - - -static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, - struct ttm_mem_zone *single_zone, - uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - int count = TTM_MEMORY_ALLOC_RETRIES; - - while (unlikely(ttm_mem_global_reserve(glob, - single_zone, - memory, true) - != 0)) { - if (ctx->no_wait_gpu) - return -ENOMEM; - if (unlikely(count-- == 0)) - return -ENOMEM; - ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx); - } - - return 0; -} - -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx) -{ - /** - * Normal allocations of kernel memory are registered in - * the kernel zone. - */ - - return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx); -} -EXPORT_SYMBOL(ttm_mem_global_alloc); - -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx) -{ - struct ttm_mem_zone *zone = NULL; - - /** - * Page allocations may be registed in a single zone - * only if highmem or !dma32. - */ - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - return ttm_mem_global_alloc_zone(glob, zone, size, ctx); -} - -void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, - uint64_t size) -{ - struct ttm_mem_zone *zone = NULL; - -#ifdef CONFIG_HIGHMEM - if (PageHighMem(page) && glob->zone_highmem != NULL) - zone = glob->zone_highmem; -#else - if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) - zone = glob->zone_kernel; -#endif - ttm_mem_global_free_zone(glob, zone, size); -} - -size_t ttm_round_pot(size_t size) -{ - if ((size & (size - 1)) == 0) - return size; - else if (size > PAGE_SIZE) - return PAGE_ALIGN(size); - else { - size_t tmp_size = 4; - - while (tmp_size < size) - tmp_size <<= 1; - - return tmp_size; - } - return 0; -} -EXPORT_SYMBOL(ttm_round_pot); diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h deleted file mode 100644 index c50dba774485..000000000000 --- a/drivers/gpu/drm/vmwgfx/ttm_memory.h +++ /dev/null @@ -1,96 +0,0 @@ -/************************************************************************** - * - * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#ifndef TTM_MEMORY_H -#define TTM_MEMORY_H - -#include <linux/workqueue.h> -#include <linux/spinlock.h> -#include <linux/bug.h> -#include <linux/wait.h> -#include <linux/errno.h> -#include <linux/kobject.h> -#include <linux/mm.h> - -#include <drm/ttm/ttm_bo_api.h> - -/** - * struct ttm_mem_global - Global memory accounting structure. - * - * @shrink: A single callback to shrink TTM memory usage. Extend this - * to a linked list to be able to handle multiple callbacks when needed. - * @swap_queue: A workqueue to handle shrinking in low memory situations. We - * need a separate workqueue since it will spend a lot of time waiting - * for the GPU, and this will otherwise block other workqueue tasks(?) - * At this point we use only a single-threaded workqueue. - * @work: The workqueue callback for the shrink queue. - * @lock: Lock to protect the @shrink - and the memory accounting members, - * that is, essentially the whole structure with some exceptions. - * @lower_mem_limit: include lower limit of swap space and lower limit of - * system memory. - * @zones: Array of pointers to accounting zones. - * @num_zones: Number of populated entries in the @zones array. - * @zone_kernel: Pointer to the kernel zone. - * @zone_highmem: Pointer to the highmem zone if there is one. - * @zone_dma32: Pointer to the dma32 zone if there is one. - * - * Note that this structure is not per device. It should be global for all - * graphics devices. - */ - -#define TTM_MEM_MAX_ZONES 2 -struct ttm_mem_zone; -extern struct ttm_mem_global { - struct kobject kobj; - struct workqueue_struct *swap_queue; - struct work_struct work; - spinlock_t lock; - uint64_t lower_mem_limit; - struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; - unsigned int num_zones; - struct ttm_mem_zone *zone_kernel; -#ifdef CONFIG_HIGHMEM - struct ttm_mem_zone *zone_highmem; -#else - struct ttm_mem_zone *zone_dma32; -#endif -} ttm_mem_glob; - -int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev); -void ttm_mem_global_release(struct ttm_mem_global *glob); -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); -int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size, - struct ttm_operation_ctx *ctx); -void ttm_mem_global_free_page(struct ttm_mem_global *glob, - struct page *page, uint64_t size); -size_t ttm_round_pot(size_t size); -bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, - struct ttm_operation_ctx *ctx); -#endif diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 04789b2bb2a2..26a55fef1ab5 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -48,7 +48,11 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/atomic.h> +#include <linux/module.h> #include "ttm_object.h" +#include "vmwgfx_drv.h" + +MODULE_IMPORT_NS(DMA_BUF); /** * struct ttm_object_file @@ -70,7 +74,7 @@ struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; struct list_head ref_list; - struct drm_open_hash ref_hash[TTM_REF_NUM]; + struct vmwgfx_open_hash ref_hash; struct kref refcount; }; @@ -88,12 +92,10 @@ struct ttm_object_file { struct ttm_object_device { spinlock_t object_lock; - struct drm_open_hash object_hash; + struct vmwgfx_open_hash object_hash; atomic_t object_count; - struct ttm_mem_global *mem_glob; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); - size_t dma_buf_size; struct idr idr; }; @@ -120,10 +122,9 @@ struct ttm_object_device { struct ttm_ref_object { struct rcu_head rcu_head; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct list_head head; struct kref kref; - enum ttm_ref_type ref_type; struct ttm_base_object *obj; struct ttm_object_file *tfile; }; @@ -159,9 +160,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, struct ttm_base_object *base, bool shareable, enum ttm_object_type object_type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { struct ttm_object_device *tdev = tfile->tdev; int ret; @@ -169,7 +168,6 @@ int ttm_base_object_init(struct ttm_object_file *tfile, base->shareable = shareable; base->tfile = ttm_object_file_ref(tfile); base->refcount_release = refcount_release; - base->ref_obj_release = ref_obj_release; base->object_type = object_type; kref_init(&base->refcount); idr_preload(GFP_KERNEL); @@ -181,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, return ret; base->handle = ret; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -244,12 +242,12 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) { - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; + struct vmwgfx_hash_item *hash; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); if (ret) { rcu_read_unlock(); return NULL; @@ -264,12 +262,12 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, uint32_t key) { struct ttm_base_object *base = NULL; - struct drm_hash_item *hash; - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; + struct vmwgfx_hash_item *hash; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; int ret; rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, key, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, key, &hash); if (likely(ret == 0)) { base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; @@ -296,64 +294,14 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key) return base; } -/** - * ttm_ref_object_exists - Check whether a caller has a valid ref object - * (has opened) a base object. - * - * @tfile: Pointer to a struct ttm_object_file identifying the caller. - * @base: Pointer to a struct base object. - * - * Checks wether the caller identified by @tfile has put a valid USAGE - * reference object on the base object identified by @base. - */ -bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base) -{ - struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; - struct drm_hash_item *hash; - struct ttm_ref_object *ref; - - rcu_read_lock(); - if (unlikely(drm_ht_find_item_rcu(ht, base->handle, &hash) != 0)) - goto out_false; - - /* - * Verify that the ref object is really pointing to our base object. - * Our base object could actually be dead, and the ref object pointing - * to another base object with the same handle. - */ - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); - if (unlikely(base != ref->obj)) - goto out_false; - - /* - * Verify that the ref->obj pointer was actually valid! - */ - rmb(); - if (unlikely(kref_read(&ref->kref) == 0)) - goto out_false; - - rcu_read_unlock(); - return true; - - out_false: - rcu_read_unlock(); - return false; -} - int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, + bool *existed, bool require_existed) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; - struct drm_hash_item *hash; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + struct vmwgfx_hash_item *hash; int ret = -EINVAL; if (base->tfile != tfile && !base->shareable) @@ -364,7 +312,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, while (ret == -EINVAL) { rcu_read_lock(); - ret = drm_ht_find_item_rcu(ht, base->handle, &hash); + ret = vmwgfx_ht_find_item_rcu(ht, base->handle, &hash); if (ret == 0) { ref = drm_hash_entry(hash, struct ttm_ref_object, hash); @@ -378,24 +326,18 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, if (require_existed) return -EPERM; - ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), - &ctx); - if (unlikely(ret != 0)) - return ret; ref = kmalloc(sizeof(*ref), GFP_KERNEL); if (unlikely(ref == NULL)) { - ttm_mem_global_free(mem_glob, sizeof(*ref)); return -ENOMEM; } ref->hash.key = base->handle; ref->obj = base; ref->tfile = tfile; - ref->ref_type = ref_type; kref_init(&ref->kref); spin_lock(&tfile->lock); - ret = drm_ht_insert_item_rcu(ht, &ref->hash); + ret = vmwgfx_ht_insert_item_rcu(ht, &ref->hash); if (likely(ret == 0)) { list_add_tail(&ref->head, &tfile->ref_list); @@ -409,7 +351,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, spin_unlock(&tfile->lock); BUG_ON(ret != -EINVAL); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree(ref); } @@ -421,35 +362,29 @@ ttm_ref_object_release(struct kref *kref) { struct ttm_ref_object *ref = container_of(kref, struct ttm_ref_object, kref); - struct ttm_base_object *base = ref->obj; struct ttm_object_file *tfile = ref->tfile; - struct drm_open_hash *ht; - struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; + struct vmwgfx_open_hash *ht; - ht = &tfile->ref_hash[ref->ref_type]; - (void)drm_ht_remove_item_rcu(ht, &ref->hash); + ht = &tfile->ref_hash; + (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); list_del(&ref->head); spin_unlock(&tfile->lock); - if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) - base->ref_obj_release(base, ref->ref_type); - ttm_base_object_unref(&ref->obj); - ttm_mem_global_free(mem_glob, sizeof(*ref)); kfree_rcu(ref, rcu_head); spin_lock(&tfile->lock); } int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, enum ttm_ref_type ref_type) + unsigned long key) { - struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; + struct vmwgfx_open_hash *ht = &tfile->ref_hash; struct ttm_ref_object *ref; - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; spin_lock(&tfile->lock); - ret = drm_ht_find_item(ht, key, &hash); + ret = vmwgfx_ht_find_item(ht, key, &hash); if (unlikely(ret != 0)) { spin_unlock(&tfile->lock); return -EINVAL; @@ -464,7 +399,6 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) { struct ttm_ref_object *ref; struct list_head *list; - unsigned int i; struct ttm_object_file *tfile = *p_tfile; *p_tfile = NULL; @@ -482,8 +416,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) } spin_unlock(&tfile->lock); - for (i = 0; i < TTM_REF_NUM; ++i) - drm_ht_remove(&tfile->ref_hash[i]); + vmwgfx_ht_remove(&tfile->ref_hash); ttm_object_file_unref(&tfile); } @@ -492,8 +425,6 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, unsigned int hash_order) { struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); - unsigned int i; - unsigned int j = 0; int ret; if (unlikely(tfile == NULL)) @@ -504,18 +435,13 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, kref_init(&tfile->refcount); INIT_LIST_HEAD(&tfile->ref_list); - for (i = 0; i < TTM_REF_NUM; ++i) { - ret = drm_ht_create(&tfile->ref_hash[i], hash_order); - if (ret) { - j = i; - goto out_err; - } - } + ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order); + if (ret) + goto out_err; return tfile; out_err: - for (i = 0; i < j; ++i) - drm_ht_remove(&tfile->ref_hash[i]); + vmwgfx_ht_remove(&tfile->ref_hash); kfree(tfile); @@ -523,8 +449,7 @@ out_err: } struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops) { struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); @@ -533,19 +458,24 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob, if (unlikely(tdev == NULL)) return NULL; - tdev->mem_glob = mem_glob; spin_lock_init(&tdev->object_lock); atomic_set(&tdev->object_count, 0); - ret = drm_ht_create(&tdev->object_hash, hash_order); + ret = vmwgfx_ht_create(&tdev->object_hash, hash_order); if (ret != 0) goto out_no_object_hash; - idr_init_base(&tdev->idr, 1); + /* + * Our base is at VMWGFX_NUM_MOB + 1 because we want to create + * a seperate namespace for GEM handles (which are + * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's + * can take either handle as an argument so we want to + * easily be able to tell whether the handle refers to a + * GEM buffer or a surface. + */ + idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); tdev->ops = *ops; tdev->dmabuf_release = tdev->ops.release; tdev->ops.release = ttm_prime_dmabuf_release; - tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) + - ttm_round_pot(sizeof(struct file)); return tdev; out_no_object_hash: @@ -561,7 +491,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) WARN_ON_ONCE(!idr_is_empty(&tdev->idr)); idr_destroy(&tdev->idr); - drm_ht_remove(&tdev->object_hash); + vmwgfx_ht_remove(&tdev->object_hash); kfree(tdev); } @@ -630,7 +560,6 @@ static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf) if (prime->dma_buf == dma_buf) prime->dma_buf = NULL; mutex_unlock(&prime->mutex); - ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size); ttm_base_object_unref(&base); } @@ -664,7 +593,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->handle; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); + ret = ttm_ref_object_add(tfile, base, NULL, false); dma_buf_put(dma_buf); @@ -712,30 +641,18 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, dma_buf = prime->dma_buf; if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; exp_info.ops = &tdev->ops; exp_info.size = prime->size; exp_info.flags = flags; exp_info.priv = prime; /* - * Need to create a new dma_buf, with memory accounting. + * Need to create a new dma_buf */ - ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size, - &ctx); - if (unlikely(ret != 0)) { - mutex_unlock(&prime->mutex); - goto out_unref; - } dma_buf = dma_buf_export(&exp_info); if (IS_ERR(dma_buf)) { ret = PTR_ERR(dma_buf); - ttm_mem_global_free(tdev->mem_glob, - tdev->dma_buf_size); mutex_unlock(&prime->mutex); goto out_unref; } @@ -770,7 +687,6 @@ out_unref: * @shareable: See ttm_base_object_init * @type: See ttm_base_object_init * @refcount_release: See ttm_base_object_init - * @ref_obj_release: See ttm_base_object_init * * Initializes an object which is compatible with the drm_prime model * for data sharing between processes and devices. @@ -778,9 +694,7 @@ out_unref: int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, struct ttm_prime_object *prime, bool shareable, enum ttm_object_type type, - void (*refcount_release) (struct ttm_base_object **), - void (*ref_obj_release) (struct ttm_base_object *, - enum ttm_ref_type ref_type)) + void (*refcount_release) (struct ttm_base_object **)) { mutex_init(&prime->mutex); prime->size = PAGE_ALIGN(size); @@ -789,6 +703,5 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, prime->refcount_release = refcount_release; return ttm_base_object_init(tfile, &prime->base, shareable, ttm_prime_type, - ttm_prime_refcount_release, - ref_obj_release); + ttm_prime_refcount_release); } diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index 49b064f0cb19..4c8700027c6d 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -42,31 +42,7 @@ #include <linux/list.h> #include <linux/rcupdate.h> -#include <drm/drm_hashtab.h> - -#include "ttm_memory.h" - -/** - * enum ttm_ref_type - * - * Describes what type of reference a ref object holds. - * - * TTM_REF_USAGE is a simple refcount on a base object. - * - * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a - * buffer object. - * - * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a - * buffer object. - * - */ - -enum ttm_ref_type { - TTM_REF_USAGE, - TTM_REF_SYNCCPU_READ, - TTM_REF_SYNCCPU_WRITE, - TTM_REF_NUM -}; +#include "vmwgfx_hashtab.h" /** * enum ttm_object_type @@ -78,7 +54,6 @@ enum ttm_ref_type { enum ttm_object_type { ttm_fence_type, - ttm_buffer_type, ttm_lock_type, ttm_prime_type, ttm_driver_type0 = 256, @@ -129,8 +104,6 @@ struct ttm_base_object { struct ttm_object_file *tfile; struct kref refcount; void (*refcount_release) (struct ttm_base_object **base); - void (*ref_obj_release) (struct ttm_base_object *base, - enum ttm_ref_type ref_type); u32 handle; enum ttm_object_type object_type; u32 shareable; @@ -179,11 +152,7 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile, bool shareable, enum ttm_object_type type, void (*refcount_release) (struct ttm_base_object - **), - void (*ref_obj_release) (struct ttm_base_object - *, - enum ttm_ref_type - ref_type)); + **)); /** * ttm_base_object_lookup @@ -247,12 +216,9 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); */ extern int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed, + bool *existed, bool require_existed); -extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, - struct ttm_base_object *base); - /** * ttm_ref_object_base_unref * @@ -265,8 +231,7 @@ extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, * will be unreferenced. */ extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, - unsigned long key, - enum ttm_ref_type ref_type); + unsigned long key); /** * ttm_object_file_init - initialize a struct ttm_object file @@ -297,7 +262,6 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); /** * ttm_object device init - initialize a struct ttm_object_device * - * @mem_glob: struct ttm_mem_global for memory accounting. * @hash_order: Order of hash table used to hash the base objects. * @ops: DMA buf ops for prime objects of this device. * @@ -306,8 +270,7 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile); */ extern struct ttm_object_device * -ttm_object_device_init(struct ttm_mem_global *mem_glob, - unsigned int hash_order, +ttm_object_device_init(unsigned int hash_order, const struct dma_buf_ops *ops); /** @@ -332,10 +295,7 @@ extern int ttm_prime_object_init(struct ttm_object_file *tfile, bool shareable, enum ttm_object_type type, void (*refcount_release) - (struct ttm_base_object **), - void (*ref_obj_release) - (struct ttm_base_object *, - enum ttm_ref_type ref_type)); + (struct ttm_base_object **)); static inline enum ttm_object_type ttm_base_object_type(struct ttm_base_object *base) @@ -353,13 +313,6 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, #define ttm_prime_object_kfree(__obj, __prime) \ kfree_rcu(__obj, __prime.base.rhead) -/* - * Extra memory required by the base object's idr storage, which is allocated - * separately from the base object itself. We estimate an on-average 128 bytes - * per idr. - */ -#define TTM_OBJ_EXTRA_SIZE 128 - struct ttm_base_object * ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 6f27d69bad0e..ae2de914eb89 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -354,6 +354,27 @@ void vmw_binding_add(struct vmw_ctx_binding_state *cbs, } /** + * vmw_binding_cb_offset_update: Update the offset of a cb binding + * + * @cbs: Pointer to the context binding state tracker. + * @shader_slot: The shader slot of the binding. + * @slot: The slot of the binding. + * @offsetInBytes: The new offset of the binding. + * + * Updates the offset of an existing cb binding in the context binding + * state structure @cbs. + */ +void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs, + u32 shader_slot, u32 slot, u32 offsetInBytes) +{ + struct vmw_ctx_bindinfo *loc = + vmw_binding_loc(cbs, vmw_ctx_binding_cb, shader_slot, slot); + struct vmw_ctx_bindinfo_cb *loc_cb = + (struct vmw_ctx_bindinfo_cb *)((u8 *) loc); + loc_cb->offset = offsetInBytes; +} + +/** * vmw_binding_add_uav_index - Add UAV index for tracking. * @cbs: Pointer to the context binding state tracker. * @slot: UAV type to which bind this index. @@ -1070,7 +1091,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); - vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); + vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); @@ -1100,7 +1121,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) size_t cmd_size, view_id_size; const struct vmw_resource *ctx = vmw_cbs_context(cbs); - vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); + vmw_collect_view_ids(cbs, loc, vmw_max_num_uavs(cbs->dev_priv)); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); @@ -1327,8 +1348,7 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) } /** - * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with - * memory accounting. + * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state. * * @dev_priv: Pointer to a device private structure. * @@ -1338,20 +1358,9 @@ struct vmw_ctx_binding_state * vmw_binding_state_alloc(struct vmw_private *dev_priv) { struct vmw_ctx_binding_state *cbs; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - int ret; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), - &ctx); - if (ret) - return ERR_PTR(ret); cbs = vzalloc(sizeof(*cbs)); if (!cbs) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); return ERR_PTR(-ENOMEM); } @@ -1362,17 +1371,13 @@ vmw_binding_state_alloc(struct vmw_private *dev_priv) } /** - * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its - * memory accounting info. + * vmw_binding_state_free - Free a struct vmw_ctx_binding_state. * * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. */ void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) { - struct vmw_private *dev_priv = cbs->dev_priv; - vfree(cbs); - ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h index dcb71fd0bb3b..85b90f7d398d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h @@ -200,7 +200,7 @@ struct vmw_dx_shader_bindings { * @splice_index: The device splice index set by user-space. */ struct vmw_ctx_bindinfo_uav { - struct vmw_ctx_bindinfo_view views[SVGA3D_MAX_UAVIEWS]; + struct vmw_ctx_bindinfo_view views[SVGA3D_DX11_1_MAX_UAVIEWS]; uint32 index; }; @@ -217,6 +217,8 @@ struct vmw_ctx_bindinfo_so { extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, const struct vmw_ctx_bindinfo *ci, u32 shader_slot, u32 slot); +extern void vmw_binding_cb_offset_update(struct vmw_ctx_binding_state *cbs, + u32 shader_slot, u32 slot, u32 offsetInBytes); extern void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot, uint32 splice_index); extern void diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index fd007f1c1776..15fead85450c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -33,18 +33,6 @@ /** - * struct vmw_user_buffer_object - User-space-visible buffer object - * - * @prime: The prime object providing user visibility. - * @vbo: The struct vmw_buffer_object - */ -struct vmw_user_buffer_object { - struct ttm_prime_object prime; - struct vmw_buffer_object vbo; -}; - - -/** * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct * vmw_buffer_object. * @@ -60,23 +48,6 @@ vmw_buffer_object(struct ttm_buffer_object *bo) /** - * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct - * vmw_user_buffer_object. - * - * @bo: Pointer to the TTM buffer object. - * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer - * object. - */ -static struct vmw_user_buffer_object * -vmw_user_buffer_object(struct ttm_buffer_object *bo) -{ - struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); - - return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); -} - - -/** * vmw_bo_pin_in_placement - Validate a buffer to placement. * * @dev_priv: Driver private. @@ -392,39 +363,6 @@ void vmw_bo_unmap(struct vmw_buffer_object *vbo) /** - * vmw_bo_acc_size - Calculate the pinned memory usage of buffers - * - * @dev_priv: Pointer to a struct vmw_private identifying the device. - * @size: The requested buffer size. - * @user: Whether this is an ordinary dma buffer or a user dma buffer. - */ -static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size, - bool user) -{ - static size_t struct_size, user_struct_size; - size_t num_pages = PFN_UP(size); - size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); - - if (unlikely(struct_size == 0)) { - size_t backend_size = ttm_round_pot(vmw_tt_size); - - struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_buffer_object)); - user_struct_size = backend_size + - ttm_round_pot(sizeof(struct vmw_user_buffer_object)) + - TTM_OBJ_EXTRA_SIZE; - } - - if (dev_priv->map_mode == vmw_dma_alloc_coherent) - page_array_size += - ttm_round_pot(num_pages * sizeof(dma_addr_t)); - - return ((user) ? user_struct_size : struct_size) + - page_array_size; -} - - -/** * vmw_bo_bo_free - vmw buffer object destructor * * @bo: Pointer to the embedded struct ttm_buffer_object @@ -436,27 +374,10 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) WARN_ON(vmw_bo->dirty); WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); vmw_bo_unmap(vmw_bo); - dma_resv_fini(&bo->base._resv); + drm_gem_object_release(&bo->base); kfree(vmw_bo); } - -/** - * vmw_user_bo_destroy - vmw buffer object destructor - * - * @bo: Pointer to the embedded struct ttm_buffer_object - */ -static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); - struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; - - WARN_ON(vbo->dirty); - WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); - vmw_bo_unmap(vbo); - ttm_prime_object_kfree(vmw_user_bo, prime); -} - /** * vmw_bo_create_kernel - Create a pinned BO for internal kernel use. * @@ -471,33 +392,27 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, struct ttm_placement *placement, struct ttm_buffer_object **p_bo) { - struct ttm_operation_ctx ctx = { false, false }; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; struct ttm_buffer_object *bo; - size_t acc_size; + struct drm_device *vdev = &dev_priv->drm; int ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(!bo)) return -ENOMEM; - acc_size = ttm_round_pot(sizeof(*bo)); - acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *)); - acc_size += ttm_round_pot(sizeof(struct ttm_tt)); - - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - goto error_free; - + size = ALIGN(size, PAGE_SIZE); - bo->base.size = size; - dma_resv_init(&bo->base._resv); - drm_vma_node_reset(&bo->base.vma_node); + drm_gem_private_object_init(vdev, &bo->base, size); ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, - ttm_bo_type_device, placement, 0, + ttm_bo_type_kernel, placement, 0, &ctx, NULL, NULL, NULL); if (unlikely(ret)) - goto error_account; + goto error_free; ttm_bo_pin(bo); ttm_bo_unreserve(bo); @@ -505,14 +420,38 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, return 0; -error_account: - ttm_mem_global_free(&ttm_mem_glob, acc_size); - error_free: kfree(bo); return ret; } +int vmw_bo_create(struct vmw_private *vmw, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo) +{ + int ret; + + *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); + if (unlikely(!*p_bo)) { + DRM_ERROR("Failed to allocate a buffer.\n"); + return -ENOMEM; + } + + ret = vmw_bo_init(vmw, *p_bo, size, + placement, interruptible, pin, + bo_free); + if (unlikely(ret != 0)) + goto out_error; + + return ret; +out_error: + kfree(*p_bo); + *p_bo = NULL; + return ret; +} + /** * vmw_bo_init - Initialize a vmw buffer object * @@ -533,192 +472,44 @@ int vmw_bo_init(struct vmw_private *dev_priv, bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)) { - struct ttm_operation_ctx ctx = { interruptible, false }; + struct ttm_operation_ctx ctx = { + .interruptible = interruptible, + .no_wait_gpu = false + }; struct ttm_device *bdev = &dev_priv->bdev; - size_t acc_size; + struct drm_device *vdev = &dev_priv->drm; int ret; - bool user = (bo_free == &vmw_user_bo_destroy); - - WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); - acc_size = vmw_bo_acc_size(dev_priv, size, user); + WARN_ON_ONCE(!bo_free); memset(vmw_bo, 0, sizeof(*vmw_bo)); BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (unlikely(ret)) - return ret; - - vmw_bo->base.base.size = size; - dma_resv_init(&vmw_bo->base.base._resv); - drm_vma_node_reset(&vmw_bo->base.base.vma_node); + size = ALIGN(size, PAGE_SIZE); + drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, - ttm_bo_type_device, placement, + ttm_bo_type_device, + placement, 0, &ctx, NULL, NULL, bo_free); if (unlikely(ret)) { - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } if (pin) ttm_bo_pin(&vmw_bo->base); ttm_bo_unreserve(&vmw_bo->base); - return 0; -} - - -/** - * vmw_user_bo_release - TTM reference base object release callback for - * vmw user buffer objects - * - * @p_base: The TTM base object pointer about to be unreferenced. - * - * Clears the TTM base object pointer and drops the reference the - * base object has on the underlying struct vmw_buffer_object. - */ -static void vmw_user_bo_release(struct ttm_base_object **p_base) -{ - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base = *p_base; - - *p_base = NULL; - - if (unlikely(base == NULL)) - return; - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - ttm_bo_put(&vmw_user_bo->vbo.base); -} - - -/** - * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback - * for vmw user buffer objects - * - * @base: Pointer to the TTM base object - * @ref_type: Reference type of the reference reaching zero. - * - * Called when user-space drops its last synccpu reference on the buffer - * object, Either explicitly or as part of a cleanup file close. - */ -static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, - enum ttm_ref_type ref_type) -{ - struct vmw_user_buffer_object *user_bo; - user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); - - switch (ref_type) { - case TTM_REF_SYNCCPU_WRITE: - atomic_dec(&user_bo->vbo.cpu_writers); - break; - default: - WARN_ONCE(true, "Undefined buffer object reference release.\n"); - } -} - - -/** - * vmw_user_bo_alloc - Allocate a user buffer object - * - * @dev_priv: Pointer to a struct device private. - * @tfile: Pointer to a struct ttm_object_file on which to register the user - * object. - * @size: Size of the buffer object. - * @shareable: Boolean whether the buffer is shareable with other open files. - * @handle: Pointer to where the handle value should be assigned. - * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer - * should be assigned. - * @p_base: The TTM base object pointer about to be allocated. - * Return: Zero on success, negative error code on error. - */ -int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_vbo, - struct ttm_base_object **p_base) -{ - struct vmw_user_buffer_object *user_bo; - int ret; - - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); - if (unlikely(!user_bo)) { - DRM_ERROR("Failed to allocate a buffer.\n"); - return -ENOMEM; - } - - ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, - (dev_priv->has_mob) ? - &vmw_sys_placement : - &vmw_vram_sys_placement, true, false, - &vmw_user_bo_destroy); - if (unlikely(ret != 0)) - return ret; - - ttm_bo_get(&user_bo->vbo.base); - ret = ttm_prime_object_init(tfile, - size, - &user_bo->prime, - shareable, - ttm_buffer_type, - &vmw_user_bo_release, - &vmw_user_bo_ref_obj_release); - if (unlikely(ret != 0)) { - ttm_bo_put(&user_bo->vbo.base); - goto out_no_base_object; - } - - *p_vbo = &user_bo->vbo; - if (p_base) { - *p_base = &user_bo->prime.base; - kref_get(&(*p_base)->refcount); - } - *handle = user_bo->prime.base.handle; - -out_no_base_object: - return ret; -} - - -/** - * vmw_user_bo_verify_access - verify access permissions on this - * buffer object. - * - * @bo: Pointer to the buffer object being accessed - * @tfile: Identifying the caller. - */ -int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile) -{ - struct vmw_user_buffer_object *vmw_user_bo; - - if (unlikely(bo->destroy != vmw_user_bo_destroy)) - return -EPERM; - - vmw_user_bo = vmw_user_buffer_object(bo); - - /* Check that the caller has opened the object. */ - if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) - return 0; - - DRM_ERROR("Could not grant buffer access.\n"); - return -EPERM; + return 0; } - /** - * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu + * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu * access, idling previous GPU operations on the buffer and optionally * blocking it for further command submissions. * - * @user_bo: Pointer to the buffer object being grabbed for CPU access - * @tfile: Identifying the caller. + * @vmw_bo: Pointer to the buffer object being grabbed for CPU access * @flags: Flags indicating how the grab should be performed. * Return: Zero on success, Negative error code on error. In particular, * -EBUSY will be returned if a dontblock operation is requested and the @@ -727,13 +518,11 @@ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, * * A blocking grab will be automatically released when @tfile is closed. */ -static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, - struct ttm_object_file *tfile, +static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, uint32_t flags) { bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); - struct ttm_buffer_object *bo = &user_bo->vbo.base; - bool existed; + struct ttm_buffer_object *bo = &vmw_bo->base; int ret; if (flags & drm_vmw_synccpu_allow_cs) { @@ -755,17 +544,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, ret = ttm_bo_wait(bo, true, nonblock); if (likely(ret == 0)) - atomic_inc(&user_bo->vbo.cpu_writers); + atomic_inc(&vmw_bo->cpu_writers); ttm_bo_unreserve(bo); if (unlikely(ret != 0)) return ret; - ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed, false); - if (ret != 0 || existed) - atomic_dec(&user_bo->vbo.cpu_writers); - return ret; } @@ -773,19 +557,23 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, * and unblock command submission on the buffer if blocked. * + * @filp: Identifying the caller. * @handle: Handle identifying the buffer object. - * @tfile: Identifying the caller. * @flags: Flags indicating the type of release. */ -static int vmw_user_bo_synccpu_release(uint32_t handle, - struct ttm_object_file *tfile, - uint32_t flags) +static int vmw_user_bo_synccpu_release(struct drm_file *filp, + uint32_t handle, + uint32_t flags) { - if (!(flags & drm_vmw_synccpu_allow_cs)) - return ttm_ref_object_base_unref(tfile, handle, - TTM_REF_SYNCCPU_WRITE); + struct vmw_buffer_object *vmw_bo; + int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); - return 0; + if (!(flags & drm_vmw_synccpu_allow_cs)) { + atomic_dec(&vmw_bo->cpu_writers); + } + ttm_bo_put(&vmw_bo->base); + + return ret; } @@ -807,9 +595,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, struct drm_vmw_synccpu_arg *arg = (struct drm_vmw_synccpu_arg *) data; struct vmw_buffer_object *vbo; - struct vmw_user_buffer_object *user_bo; - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_base_object *buffer_base; int ret; if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 @@ -822,16 +607,12 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, switch (arg->op) { case drm_vmw_synccpu_grab: - ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, - &buffer_base); + ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); if (unlikely(ret != 0)) return ret; - user_bo = container_of(vbo, struct vmw_user_buffer_object, - vbo); - ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); + ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); vmw_bo_unreference(&vbo); - ttm_base_object_unref(&buffer_base); if (unlikely(ret != 0 && ret != -ERESTARTSYS && ret != -EBUSY)) { DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", @@ -840,7 +621,8 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, } break; case drm_vmw_synccpu_release: - ret = vmw_user_bo_synccpu_release(arg->handle, tfile, + ret = vmw_user_bo_synccpu_release(file_priv, + arg->handle, arg->flags); if (unlikely(ret != 0)) { DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", @@ -856,50 +638,6 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return 0; } - -/** - * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object - * allocation functionality. - * - * @dev: Identifies the drm device. - * @data: Pointer to the ioctl argument. - * @file_priv: Identifies the caller. - * Return: Zero on success, negative error code on error. - * - * This function checks the ioctl arguments for validity and allocates a - * struct vmw_user_buffer_object bo. - */ -int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct vmw_private *dev_priv = vmw_priv(dev); - union drm_vmw_alloc_dmabuf_arg *arg = - (union drm_vmw_alloc_dmabuf_arg *)data; - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; - struct drm_vmw_dmabuf_rep *rep = &arg->rep; - struct vmw_buffer_object *vbo; - uint32_t handle; - int ret; - - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - req->size, false, &handle, &vbo, - NULL); - if (unlikely(ret != 0)) - goto out_no_bo; - - rep->handle = handle; - rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); - rep->cur_gmr_id = handle; - rep->cur_gmr_offset = 0; - - vmw_bo_unreference(&vbo); - -out_no_bo: - - return ret; -} - - /** * vmw_bo_unref_ioctl - Generic handle close ioctl. * @@ -917,65 +655,48 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, struct drm_vmw_unref_dmabuf_arg *arg = (struct drm_vmw_unref_dmabuf_arg *)data; - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->handle, - TTM_REF_USAGE); + drm_gem_handle_delete(file_priv, arg->handle); + return 0; } /** * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. * - * @tfile: The TTM object file the handle is registered with. + * @filp: The file the handle is registered with. * @handle: The user buffer object handle * @out: Pointer to a where a pointer to the embedded * struct vmw_buffer_object should be placed. - * @p_base: Pointer to where a pointer to the TTM base object should be - * placed, or NULL if no such pointer is required. * Return: Zero on success, Negative error code on error. * - * Both the output base object pointer and the vmw buffer object pointer - * will be refcounted. + * The vmw buffer object pointer will be refcounted. */ -int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t handle, struct vmw_buffer_object **out, - struct ttm_base_object **p_base) +int vmw_user_bo_lookup(struct drm_file *filp, + uint32_t handle, + struct vmw_buffer_object **out) { - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base; + struct drm_gem_object *gobj; - base = ttm_base_object_lookup(tfile, handle); - if (unlikely(base == NULL)) { + gobj = drm_gem_object_lookup(filp, handle); + if (!gobj) { DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -ESRCH; } - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { - ttm_base_object_unref(&base); - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", - (unsigned long)handle); - return -EINVAL; - } - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - ttm_bo_get(&vmw_user_bo->vbo.base); - if (p_base) - *p_base = base; - else - ttm_base_object_unref(&base); - *out = &vmw_user_bo->vbo; + *out = gem_to_vmw_bo(gobj); + ttm_bo_get(&(*out)->base); + drm_gem_object_put(gobj); return 0; } /** * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference - * @tfile: The TTM object file the handle is registered with. + * @filp: The TTM object file the handle is registered with. * @handle: The user buffer object handle. * - * This function looks up a struct vmw_user_bo and returns a pointer to the + * This function looks up a struct vmw_bo and returns a pointer to the * struct vmw_buffer_object it derives from without refcounting the pointer. * The returned pointer is only valid until vmw_user_bo_noref_release() is * called, and the object pointed to by the returned pointer may be doomed. @@ -988,52 +709,23 @@ int vmw_user_bo_lookup(struct ttm_object_file *tfile, * error pointer on failure. */ struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) +vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle) { - struct vmw_user_buffer_object *vmw_user_bo; - struct ttm_base_object *base; + struct vmw_buffer_object *vmw_bo; + struct ttm_buffer_object *bo; + struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle); - base = ttm_base_object_noref_lookup(tfile, handle); - if (!base) { + if (!gobj) { DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return ERR_PTR(-ESRCH); } + vmw_bo = gem_to_vmw_bo(gobj); + bo = ttm_bo_get_unless_zero(&vmw_bo->base); + vmw_bo = vmw_buffer_object(bo); + drm_gem_object_put(gobj); - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { - ttm_base_object_noref_release(); - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", - (unsigned long)handle); - return ERR_PTR(-EINVAL); - } - - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, - prime.base); - return &vmw_user_bo->vbo; -} - -/** - * vmw_user_bo_reference - Open a handle to a vmw user buffer object. - * - * @tfile: The TTM object file to register the handle with. - * @vbo: The embedded vmw buffer object. - * @handle: Pointer to where the new handle should be placed. - * Return: Zero on success, Negative error code on error. - */ -int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *vbo, - uint32_t *handle) -{ - struct vmw_user_buffer_object *user_bo; - - if (vbo->base.destroy != vmw_user_bo_destroy) - return -EINVAL; - - user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); - - *handle = user_bo->prime.base.handle; - return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL, false); + return vmw_bo; } @@ -1087,68 +779,15 @@ int vmw_dumb_create(struct drm_file *file_priv, int ret; args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = args->pitch * args->height; + args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, - args->size, false, &args->handle, - &vbo, NULL); - if (unlikely(ret != 0)) - goto out_no_bo; + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, + args->size, &args->handle, + &vbo); - vmw_bo_unreference(&vbo); -out_no_bo: return ret; } - -/** - * vmw_dumb_map_offset - Return the address space offset of a dumb buffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * @offset: The address space offset returned. - * Return: Zero on success, negative error code on failure. - * - * This is a driver callback for the core drm dumb_map_offset functionality. - */ -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset) -{ - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_buffer_object *out_buf; - int ret; - - ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); - if (ret != 0) - return -EINVAL; - - *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node); - vmw_bo_unreference(&out_buf); - return 0; -} - - -/** - * vmw_dumb_destroy - Destroy a dumb boffer - * - * @file_priv: Pointer to a struct drm_file identifying the caller. - * @dev: Pointer to the drm device. - * @handle: Handle identifying the dumb buffer. - * Return: Zero on success, negative error code on failure. - * - * This is a driver callback for the core drm dumb_destroy functionality. - */ -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle) -{ - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - handle, TTM_REF_USAGE); -} - - /** * vmw_bo_swap_notify - swapout notify callback. * @@ -1157,8 +796,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv, void vmw_bo_swap_notify(struct ttm_buffer_object *bo) { /* Is @bo embedded in a struct vmw_buffer_object? */ - if (bo->destroy != vmw_bo_bo_free && - bo->destroy != vmw_user_bo_destroy) + if (vmw_bo_is_vmw_bo(bo)) return; /* Kill any cached kernel maps before swapout */ @@ -1182,8 +820,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct vmw_buffer_object *vbo; /* Make sure @bo is embedded in a struct vmw_buffer_object? */ - if (bo->destroy != vmw_bo_bo_free && - bo->destroy != vmw_user_bo_destroy) + if (vmw_bo_is_vmw_bo(bo)) return; vbo = container_of(bo, struct vmw_buffer_object, base); @@ -1204,3 +841,22 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) vmw_resource_unbind_list(vbo); } + +/** + * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object + * @bo: buffer object to be checked + * + * Uses destroy function associated with the object to determine if this is + * a &vmw_buffer_object. + * + * Returns: + * true if the object is of &vmw_buffer_object type, false if not. + */ +bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &vmw_bo_bo_free || + bo->destroy == &vmw_gem_destroy) + return true; + + return false; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 67db472d3493..a3bfbb6c3e14 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -145,6 +145,13 @@ struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) (unsigned int) max, (unsigned int) min, (unsigned int) fifo->capabilities); + + if (unlikely(min >= max)) { + drm_warn(&dev_priv->drm, + "FIFO memory is not usable. Driver failed to initialize."); + return ERR_PTR(-ENXIO); + } + return fifo; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 8381750db81b..415774fde796 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -42,7 +42,7 @@ */ struct vmw_cmdbuf_res { struct vmw_resource *res; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct list_head head; enum vmw_cmdbuf_res_state state; struct vmw_cmdbuf_res_manager *man; @@ -59,7 +59,7 @@ struct vmw_cmdbuf_res { * @resources and @list are protected by the cmdbuf mutex for now. */ struct vmw_cmdbuf_res_manager { - struct drm_open_hash resources; + struct vmwgfx_open_hash resources; struct list_head list; struct vmw_private *dev_priv; }; @@ -81,11 +81,11 @@ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, u32 user_key) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; unsigned long key = user_key | (res_type << 24); - ret = drm_ht_find_item(&man->resources, key, &hash); + ret = vmwgfx_ht_find_item(&man->resources, key, &hash); if (unlikely(ret != 0)) return ERR_PTR(ret); @@ -105,7 +105,7 @@ static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man, struct vmw_cmdbuf_res *entry) { list_del(&entry->head); - WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); + WARN_ON(vmwgfx_ht_remove_item(&man->resources, &entry->hash)); vmw_resource_unreference(&entry->res); kfree(entry); } @@ -167,7 +167,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - ret = drm_ht_insert_item(&entry->man->resources, &entry->hash); + ret = vmwgfx_ht_insert_item(&entry->man->resources, &entry->hash); BUG_ON(ret); list_move_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; @@ -206,7 +206,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, return -ENOMEM; cres->hash.key = user_key | (res_type << 24); - ret = drm_ht_insert_item(&man->resources, &cres->hash); + ret = vmwgfx_ht_insert_item(&man->resources, &cres->hash); if (unlikely(ret != 0)) { kfree(cres); goto out_invalid_key; @@ -244,10 +244,10 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, struct vmw_resource **res_p) { struct vmw_cmdbuf_res *entry; - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; int ret; - ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24), + ret = vmwgfx_ht_find_item(&man->resources, user_key | (res_type << 24), &hash); if (likely(ret != 0)) return -EINVAL; @@ -260,7 +260,7 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, *res_p = NULL; break; case VMW_CMDBUF_RES_COMMITTED: - (void) drm_ht_remove_item(&man->resources, &entry->hash); + (void) vmwgfx_ht_remove_item(&man->resources, &entry->hash); list_del(&entry->head); entry->state = VMW_CMDBUF_RES_DEL; list_add_tail(&entry->head, list); @@ -295,7 +295,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) man->dev_priv = dev_priv; INIT_LIST_HEAD(&man->list); - ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); + ret = vmwgfx_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER); if (ret == 0) return man; @@ -320,26 +320,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); - drm_ht_remove(&man->resources); + vmwgfx_ht_remove(&man->resources); kfree(man); } -/** - * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed - * resource manager - * - * Returns the approximate allocation size of a command buffer managed - * resource manager. - */ -size_t vmw_cmdbuf_res_man_size(void) -{ - static size_t res_man_size; - - if (unlikely(res_man_size == 0)) - res_man_size = - ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) + - ttm_round_pot(sizeof(struct hlist_head) << - VMW_CMDBUF_RES_MAN_HT_ORDER); - - return res_man_size; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 4446758b6880..e0f48cd9529b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -60,8 +60,6 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf); static int vmw_dx_context_destroy(struct vmw_resource *res); -static uint64_t vmw_user_context_size; - static const struct vmw_user_resource_conv user_context_conv = { .object_type = VMW_RES_CONTEXT, .base_obj_to_res = vmw_user_context_base_to_res, @@ -686,7 +684,6 @@ static void vmw_user_context_free(struct vmw_resource *res) { struct vmw_user_context *ctx = container_of(res, struct vmw_user_context, res); - struct vmw_private *dev_priv = res->dev_priv; if (ctx->cbs) vmw_binding_state_free(ctx->cbs); @@ -694,8 +691,6 @@ static void vmw_user_context_free(struct vmw_resource *res) (void) vmw_context_bind_dx_query(res, NULL); ttm_base_object_kfree(ctx, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); } /* @@ -720,7 +715,7 @@ int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->cid); } static int vmw_context_define(struct drm_device *dev, void *data, @@ -732,10 +727,6 @@ static int vmw_context_define(struct drm_device *dev, void *data, struct vmw_resource *tmp; struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; if (!has_sm4_context(dev_priv) && dx) { @@ -743,25 +734,8 @@ static int vmw_context_define(struct drm_device *dev, void *data, return -EINVAL; } - if (unlikely(vmw_user_context_size == 0)) - vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + - ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) + - + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_context_size, - &ttm_opt_ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for context" - " creation.\n"); - goto out_ret; - } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (unlikely(!ctx)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_context_size); ret = -ENOMEM; goto out_ret; } @@ -780,7 +754,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, tmp = vmw_resource_reference(&ctx->res); ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, - &vmw_user_context_base_release, NULL); + &vmw_user_context_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 17a98db00017..16f986b6cbea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -407,12 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement, - true, true, vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement, + true, true, vmw_bo_bo_free, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; @@ -546,8 +542,6 @@ static void vmw_hw_cotable_destroy(struct vmw_resource *res) (void) vmw_cotable_destroy(res); } -static size_t cotable_acc_size; - /** * vmw_cotable_free - Cotable resource destructor * @@ -555,10 +549,7 @@ static size_t cotable_acc_size; */ static void vmw_cotable_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; - kfree(res); - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); } /** @@ -574,21 +565,9 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, u32 type) { struct vmw_cotable *vcotbl; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; u32 num_entries; - if (unlikely(cotable_acc_size == 0)) - cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - cotable_acc_size, &ttm_opt_ctx); - if (unlikely(ret)) - return ERR_PTR(ret); - vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); if (unlikely(!vcotbl)) { ret = -ENOMEM; @@ -622,7 +601,6 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, out_no_init: kfree(vcotbl); out_no_alloc: - ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ab9a1750e1df..2d59bdad0373 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -25,16 +25,16 @@ * **************************************************************************/ -#include <linux/console.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> #include <drm/drm_sysfs.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_placement.h> @@ -51,9 +51,6 @@ #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 -#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) - - /* * Fully encoded drm commands. Might move to vmw_drm.h */ @@ -166,7 +163,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, + DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, DRM_RENDER_ALLOW), @@ -258,8 +255,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { }; static const struct pci_device_id vmw_pci_id_list[] = { - { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, - { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) }, + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA2) }, + { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, VMWGFX_PCI_ID_SVGA3) }, { } }; MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); @@ -367,6 +364,7 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv) [VMW_SM_4] = "SM4", [VMW_SM_4_1] = "SM4_1", [VMW_SM_5] = "SM_5", + [VMW_SM_5_1X] = "SM_5_1X", [VMW_SM_MAX] = "Invalid" }; BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1)); @@ -400,13 +398,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) * immediately succeed. This is because we're the only * user of the bo currently. */ - vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); - if (!vbo) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, - &vmw_sys_placement, false, true, - &vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, PAGE_SIZE, + &vmw_sys_placement, false, true, + &vmw_bo_bo_free, &vbo); if (unlikely(ret != 0)) return ret; @@ -666,7 +660,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_map_bind] = "Giving up DMA mappings early."}; /* TTM currently doesn't fully support SEV encryption. */ - if (mem_encrypt_active()) + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) return -EINVAL; if (vmw_force_coherent) @@ -987,8 +981,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) goto out_err0; } - dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, - &vmw_prime_dmabuf_ops); + dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { drm_err(&dev_priv->drm, @@ -1071,6 +1064,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) "3D will be disabled.\n"); dev_priv->has_mob = false; } + if (vmw_sys_man_init(dev_priv) != 0) { + drm_info(&dev_priv->drm, + "No MOB page table memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } } if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { @@ -1078,8 +1077,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->sm_type = VMW_SM_4; } - vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); - /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ if (has_sm4_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { @@ -1087,8 +1084,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->sm_type = VMW_SM_4_1; if (has_sm4_1_context(dev_priv) && (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { - if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) { dev_priv->sm_type = VMW_SM_5; + if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43)) + dev_priv->sm_type = VMW_SM_5_1X; + } } } @@ -1121,8 +1121,10 @@ out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: - if (dev_priv->has_mob) + if (dev_priv->has_mob) { vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); + vmw_sys_man_fini(dev_priv); + } if (dev_priv->has_gmr) vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_devcaps_destroy(dev_priv); @@ -1156,7 +1158,7 @@ static void vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.res_ht_initialized) - drm_ht_remove(&dev_priv->ctx.res_ht); + vmwgfx_ht_remove(&dev_priv->ctx.res_ht); vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { vmw_fb_off(dev_priv); @@ -1172,8 +1174,10 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_release_device_early(dev_priv); - if (dev_priv->has_mob) + if (dev_priv->has_mob) { vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); + vmw_sys_man_fini(dev_priv); + } vmw_devcaps_destroy(dev_priv); vmw_vram_manager_fini(dev_priv); ttm_device_fini(&dev_priv->bdev); @@ -1388,7 +1392,6 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - ttm_mem_global_release(&ttm_mem_glob); drm_dev_unregister(dev); vmw_driver_unload(dev); } @@ -1576,7 +1579,7 @@ static const struct file_operations vmwgfx_driver_fops = { static const struct drm_driver driver = { .driver_features = - DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, + DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM, .ioctls = vmw_ioctls, .num_ioctls = ARRAY_SIZE(vmw_ioctls), .master_set = vmw_master_set, @@ -1585,8 +1588,7 @@ static const struct drm_driver driver = { .postclose = vmw_postclose, .dumb_create = vmw_dumb_create, - .dumb_map_offset = vmw_dumb_map_offset, - .dumb_destroy = vmw_dumb_destroy, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, .prime_fd_to_handle = vmw_prime_fd_to_handle, .prime_handle_to_fd = vmw_prime_handle_to_fd, @@ -1617,41 +1619,43 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) - return ret; + goto out_error; ret = pcim_enable_device(pdev); if (ret) - return ret; + goto out_error; vmw = devm_drm_dev_alloc(&pdev->dev, &driver, struct vmw_private, drm); - if (IS_ERR(vmw)) - return PTR_ERR(vmw); + if (IS_ERR(vmw)) { + ret = PTR_ERR(vmw); + goto out_error; + } pci_set_drvdata(pdev, &vmw->drm); - ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); - if (ret) - return ret; - ret = vmw_driver_load(vmw, ent->device); if (ret) - return ret; + goto out_error; ret = drm_dev_register(&vmw->drm, 0); - if (ret) { - vmw_driver_unload(&vmw->drm); - return ret; - } + if (ret) + goto out_unload; + + vmw_debugfs_gem_init(vmw); return 0; +out_unload: + vmw_driver_unload(&vmw->drm); +out_error: + return ret; } static int __init vmwgfx_init(void) { int ret; - if (vgacon_text_force()) + if (drm_firmware_drivers_only()) return -EINVAL; ret = pci_register_driver(&vmw_pci_driver); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 858aff99a3fe..4ec2b99351cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -34,7 +34,6 @@ #include <drm/drm_auth.h> #include <drm/drm_device.h> #include <drm/drm_file.h> -#include <drm/drm_hashtab.h> #include <drm/drm_rect.h> #include <drm/ttm/ttm_bo_driver.h> @@ -43,6 +42,7 @@ #include "ttm_object.h" #include "vmwgfx_fence.h" +#include "vmwgfx_hashtab.h" #include "vmwgfx_reg.h" #include "vmwgfx_validation.h" @@ -54,9 +54,9 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20210722" +#define VMWGFX_DRIVER_DATE "20211206" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 19 +#define VMWGFX_DRIVER_MINOR 20 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 @@ -82,8 +82,9 @@ VMWGFX_NUM_GB_SURFACE +\ VMWGFX_NUM_GB_SCREEN_TARGET) -#define VMW_PL_GMR (TTM_PL_PRIV + 0) -#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_GMR (TTM_PL_PRIV + 0) +#define VMW_PL_MOB (TTM_PL_PRIV + 1) +#define VMW_PL_SYSTEM (TTM_PL_PRIV + 2) #define VMW_RES_CONTEXT ttm_driver_type0 #define VMW_RES_SURFACE ttm_driver_type1 @@ -133,7 +134,7 @@ struct vmw_buffer_object { */ struct vmw_validate_buffer { struct ttm_validate_buffer base; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; bool validate_as_mob; }; @@ -332,7 +333,6 @@ struct vmw_sg_table { struct page **pages; const dma_addr_t *addrs; struct sg_table *sgt; - unsigned long num_regions; unsigned long num_pages; }; @@ -360,6 +360,19 @@ struct vmw_piter { dma_addr_t (*dma_address)(struct vmw_piter *); }; + +struct vmw_ttm_tt { + struct ttm_tt dma_ttm; + struct vmw_private *dev_priv; + int gmr_id; + struct vmw_mob *mob; + int mem_type; + struct sg_table sgt; + struct vmw_sg_table vsgt; + bool mapped; + bool bound; +}; + /* * enum vmw_display_unit_type - Describes the display unit */ @@ -406,10 +419,11 @@ struct vmw_ctx_validation_info; * @ctx: The validation context */ struct vmw_sw_context{ - struct drm_open_hash res_ht; + struct vmwgfx_open_hash res_ht; bool res_ht_initialized; bool kernel; struct vmw_fpriv *fp; + struct drm_file *filp; uint32_t *cmd_bounce; uint32_t cmd_bounce_size; struct vmw_buffer_object *cur_query_bo; @@ -473,6 +487,7 @@ enum { * @VMW_SM_4: Context support upto SM4. * @VMW_SM_4_1: Context support upto SM4_1. * @VMW_SM_5: Context support up to SM5. + * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. * @VMW_SM_MAX: Should be the last. */ enum vmw_sm_type { @@ -480,6 +495,7 @@ enum vmw_sm_type { VMW_SM_4, VMW_SM_4_1, VMW_SM_5, + VMW_SM_5_1X, VMW_SM_MAX }; @@ -627,9 +643,6 @@ struct vmw_private { struct vmw_cmdbuf_man *cman; DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); - /* Validation memory reservation */ - struct vmw_validation_mem vvm; - uint32 *devcaps; /* @@ -645,6 +658,11 @@ struct vmw_private { #endif }; +static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj) +{ + return container_of((gobj), struct vmw_buffer_object, base.base); +} + static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) { return container_of(res, struct vmw_surface, res); @@ -738,6 +756,24 @@ static inline bool has_sm5_context(const struct vmw_private *dev_priv) return (dev_priv->sm_type >= VMW_SM_5); } +/** + * has_gl43_context - Does the device support GL43 context. + * @dev_priv: Device private. + * + * Return: Bool value if device support SM5 context or not. + */ +static inline bool has_gl43_context(const struct vmw_private *dev_priv) +{ + return (dev_priv->sm_type >= VMW_SM_5_1X); +} + + +static inline u32 vmw_max_num_uavs(struct vmw_private *dev_priv) +{ + return (has_gl43_context(dev_priv) ? + SVGA3D_DX11_1_MAX_UAVIEWS : SVGA3D_MAX_UAVIEWS); +} + extern void vmw_svga_enable(struct vmw_private *dev_priv); extern void vmw_svga_disable(struct vmw_private *dev_priv); @@ -767,7 +803,7 @@ extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, bool no_backup); extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, + struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, struct vmw_buffer_object **out_buf); @@ -833,6 +869,7 @@ static inline void vmw_user_resource_noref_release(void) /** * Buffer object helper functions - vmwgfx_bo.c */ +extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo); extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, struct vmw_buffer_object *bo, struct ttm_placement *placement, @@ -857,32 +894,23 @@ extern int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, struct ttm_placement *placement, struct ttm_buffer_object **p_bo); +extern int vmw_bo_create(struct vmw_private *dev_priv, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo); extern int vmw_bo_init(struct vmw_private *dev_priv, struct vmw_buffer_object *vmw_bo, size_t size, struct ttm_placement *placement, bool interruptible, bool pin, void (*bo_free)(struct ttm_buffer_object *bo)); -extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, - struct ttm_object_file *tfile); -extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, - uint32_t size, - bool shareable, - uint32_t *handle, - struct vmw_buffer_object **p_dma_buf, - struct ttm_base_object **p_base); -extern int vmw_user_bo_reference(struct ttm_object_file *tfile, - struct vmw_buffer_object *dma_buf, - uint32_t *handle); -extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, - uint32_t id, struct vmw_buffer_object **out, - struct ttm_base_object **base); +extern int vmw_user_bo_lookup(struct drm_file *filp, + uint32_t handle, + struct vmw_buffer_object **out); extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence); extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); @@ -891,16 +919,7 @@ extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern struct vmw_buffer_object * -vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); - -/** - * vmw_user_bo_noref_release - release a buffer object pointer looked up - * without reference - */ -static inline void vmw_user_bo_noref_release(void) -{ - ttm_base_object_noref_release(); -} +vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle); /** * vmw_bo_adjust_prio - Adjust the buffer object eviction priority @@ -952,6 +971,19 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) } /** + * GEM related functionality - vmwgfx_gem.c + */ +extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_buffer_object **p_vbo); +extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +extern void vmw_gem_destroy(struct ttm_buffer_object *bo); +extern void vmw_debugfs_gem_init(struct vmw_private *vdev); + +/** * Misc Ioctl functionality - vmwgfx_ioctl.c */ @@ -1027,9 +1059,6 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); -extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, - size_t gran); - /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ @@ -1039,7 +1068,6 @@ extern struct ttm_placement vmw_vram_placement; extern struct ttm_placement vmw_vram_sys_placement; extern struct ttm_placement vmw_vram_gmr_placement; extern struct ttm_placement vmw_sys_placement; -extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_nonfixed_placement; @@ -1218,13 +1246,6 @@ void vmw_kms_lost_device(struct drm_device *dev); int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); - -int vmw_dumb_map_offset(struct drm_file *file_priv, - struct drm_device *dev, uint32_t handle, - uint64_t *offset); -int vmw_dumb_destroy(struct drm_file *file_priv, - struct drm_device *dev, - uint32_t handle); extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); extern void vmw_resource_unpin(struct vmw_resource *res); extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); @@ -1252,6 +1273,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type); /** + * System memory manager + */ +int vmw_sys_man_init(struct vmw_private *dev_priv); +void vmw_sys_man_fini(struct vmw_private *dev_priv); + +/** * Prime - vmwgfx_prime.c */ @@ -1322,18 +1349,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vmw_surface_gb_priv_define(struct drm_device *dev, - uint32_t user_accounting_size, - SVGA3dSurfaceAllFlags svga3d_flags, - SVGA3dSurfaceFormat format, - bool for_scanout, - uint32_t num_mip_levels, - uint32_t multisample_count, - uint32_t array_size, - struct drm_vmw_size size, - SVGA3dMSPattern multisample_pattern, - SVGA3dMSQualityLevel quality_level, - struct vmw_surface **srf_out); extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1342,7 +1357,6 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, struct drm_file *file_priv); int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out); @@ -1403,7 +1417,6 @@ void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv, extern struct vmw_cmdbuf_res_manager * vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv); extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man); -extern size_t vmw_cmdbuf_res_man_size(void); extern struct vmw_resource * vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man, enum vmw_cmdbuf_res_type res_type, @@ -1600,11 +1613,6 @@ vmw_bo_reference(struct vmw_buffer_object *buf) return buf; } -static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) -{ - return &ttm_mem_glob; -} - static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 5f2ffa9de5c8..44ca23b0ea4e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1171,14 +1171,13 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -1226,14 +1225,13 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, int ret; vmw_validation_preload_bo(sw_context->ctx); - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); - if (IS_ERR(vmw_bo)) { + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); + if (IS_ERR_OR_NULL(vmw_bo)) { VMW_DEBUG_USER("Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } - ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); - vmw_user_bo_noref_release(); + ttm_bo_put(&vmw_bo->base); if (unlikely(ret != 0)) return ret; @@ -2166,6 +2164,44 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, } /** + * vmw_cmd_dx_set_constant_buffer_offset - Validate + * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. + * + * @dev_priv: Pointer to a device private struct. + * @sw_context: The software context being used for this batch. + * @header: Pointer to the command header in the command stream. + */ +static int +vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) +{ + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset); + + struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); + u32 shader_slot; + + if (!has_sm5_context(dev_priv)) + return -EINVAL; + + if (!ctx_node) + return -EINVAL; + + cmd = container_of(header, typeof(*cmd), header); + if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer slot %u.\n", + (unsigned int) cmd->body.slot); + return -EINVAL; + } + + shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET; + vmw_binding_cb_offset_update(ctx_node->staged, shader_slot, + cmd->body.slot, cmd->body.offsetInBytes); + + return 0; +} + +/** * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES * command * @@ -2918,7 +2954,7 @@ static int vmw_cmd_set_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -2950,7 +2986,7 @@ static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, if (!has_sm5_context(dev_priv)) return -EINVAL; - if (num_uav > SVGA3D_MAX_UAVIEWS) { + if (num_uav > vmw_max_num_uavs(dev_priv)) { VMW_DEBUG_USER("Invalid UAV binding.\n"); return -EINVAL; } @@ -3528,6 +3564,24 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER, &vmw_cmd_dx_transfer_from_buffer, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET, + &vmw_cmd_dx_set_constant_buffer_offset, + true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy, true, false, true), @@ -3561,6 +3615,8 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { &vmw_cmd_dx_define_streamoutput, true, false, true), VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT, &vmw_cmd_dx_bind_streamoutput, true, false, true), + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2, + &vmw_cmd_dx_so_define, true, false, true), }; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) @@ -3869,8 +3925,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, fence_rep.fd = -1; } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, - TTM_REF_USAGE); + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); @@ -4054,8 +4109,6 @@ int vmw_execbuf_process(struct drm_file *file_priv, struct sync_file *sync_file = NULL; DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1); - vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm); - if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) { out_fence_fd = get_unused_fd_flags(O_CLOEXEC); if (out_fence_fd < 0) { @@ -4101,6 +4154,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, sw_context->kernel = true; } + sw_context->filp = file_priv; sw_context->fp = vmw_fpriv(file_priv); INIT_LIST_HEAD(&sw_context->ctx_list); sw_context->cur_query_bo = dev_priv->pinned_bo; @@ -4117,7 +4171,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, vmw_binding_state_reset(sw_context->staged_bindings); if (!sw_context->res_ht_initialized) { - ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); + ret = vmwgfx_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); if (unlikely(ret != 0)) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index d18c6a56e3dc..8ee34576c7d0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -394,22 +394,15 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, struct vmw_buffer_object *vmw_bo; int ret; - vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); - if (!vmw_bo) { - ret = -ENOMEM; - goto err_unlock; - } - - ret = vmw_bo_init(vmw_priv, vmw_bo, size, + ret = vmw_bo_create(vmw_priv, size, &vmw_sys_placement, false, false, - &vmw_bo_bo_free); + &vmw_bo_bo_free, &vmw_bo); if (unlikely(ret != 0)) - goto err_unlock; /* init frees the buffer on failure */ + return ret; *out = vmw_bo; -err_unlock: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 9fe12329a4d5..c60d395f9e2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -37,9 +37,6 @@ struct vmw_fence_manager { spinlock_t lock; struct list_head fence_list; struct work_struct work; - u32 user_fence_size; - u32 fence_size; - u32 event_fence_action_size; bool fifo_down; struct list_head cleanup_list; uint32_t pending_actions[VMW_ACTION_MAX]; @@ -304,11 +301,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) INIT_LIST_HEAD(&fman->cleanup_list); INIT_WORK(&fman->work, &vmw_fence_work_func); fman->fifo_down = true; - fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + - TTM_OBJ_EXTRA_SIZE; - fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); - fman->event_fence_action_size = - ttm_round_pot(sizeof(struct vmw_event_fence_action)); mutex_init(&fman->goal_irq_mutex); fman->ctx = dma_fence_context_alloc(1); @@ -560,14 +552,8 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) { struct vmw_user_fence *ufence = container_of(fence, struct vmw_user_fence, fence); - struct vmw_fence_manager *fman = fman_from_fence(fence); ttm_base_object_kfree(ufence, base); - /* - * Free kernel space accounting. - */ - ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), - fman->user_fence_size); } static void vmw_user_fence_base_release(struct ttm_base_object **p_base) @@ -590,23 +576,8 @@ int vmw_user_fence_create(struct drm_file *file_priv, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_user_fence *ufence; struct vmw_fence_obj *tmp; - struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; int ret; - /* - * Kernel memory space accounting, since this object may - * be created by a user-space request. - */ - - ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, - &ctx); - if (unlikely(ret != 0)) - return ret; - ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); if (unlikely(!ufence)) { ret = -ENOMEM; @@ -625,9 +596,10 @@ int vmw_user_fence_create(struct drm_file *file_priv, * vmw_user_fence_base_release. */ tmp = vmw_fence_obj_reference(&ufence->fence); + ret = ttm_base_object_init(tfile, &ufence->base, false, VMW_RES_FENCE, - &vmw_user_fence_base_release, NULL); + &vmw_user_fence_base_release); if (unlikely(ret != 0)) { @@ -646,7 +618,6 @@ out_err: tmp = &ufence->fence; vmw_fence_obj_unreference(&tmp); out_no_object: - ttm_mem_global_free(mem_glob, fman->user_fence_size); return ret; } @@ -831,8 +802,7 @@ out: */ if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) - return ttm_ref_object_base_unref(tfile, arg->handle, - TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->handle); return ret; } @@ -874,8 +844,7 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_fence_arg *) data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->handle, - TTM_REF_USAGE); + arg->handle); } /** @@ -1121,7 +1090,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, if (user_fence_rep != NULL) { ret = ttm_ref_object_add(vmw_fp->tfile, base, - TTM_REF_USAGE, NULL, false); + NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); @@ -1164,7 +1133,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, return 0; out_no_create: if (user_fence_rep != NULL) - ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, handle); out_no_ref_obj: vmw_fence_obj_unreference(&fence); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c new file mode 100644 index 000000000000..c016c434b6cb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -0,0 +1,294 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "vmwgfx_drv.h" + +#include "drm/drm_prime.h" +#include "drm/drm_gem_ttm_helper.h" + +/** + * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct + * vmw_buffer_object. + * + * @bo: Pointer to the TTM buffer object. + * Return: Pointer to the struct vmw_buffer_object embedding the + * TTM buffer object. + */ +static struct vmw_buffer_object * +vmw_buffer_object(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vmw_buffer_object, base); +} + +static void vmw_gem_object_free(struct drm_gem_object *gobj) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); + if (bo) { + ttm_bo_put(bo); + } +} + +static int vmw_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ + return 0; +} + +static void vmw_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ +} + +static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (unlikely(ret != 0)) + goto err; + + vmw_bo_pin_reserved(vbo, do_pin); + + ttm_bo_unreserve(bo); + +err: + return ret; +} + + +static int vmw_gem_object_pin(struct drm_gem_object *obj) +{ + return vmw_gem_pin_private(obj, true); +} + +static void vmw_gem_object_unpin(struct drm_gem_object *obj) +{ + vmw_gem_pin_private(obj, false); +} + +static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); + struct vmw_ttm_tt *vmw_tt = + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); + + if (vmw_tt->vsgt.sgt) + return vmw_tt->vsgt.sgt; + + return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); +} + + +static const struct drm_gem_object_funcs vmw_gem_object_funcs = { + .free = vmw_gem_object_free, + .open = vmw_gem_object_open, + .close = vmw_gem_object_close, + .print_info = drm_gem_ttm_print_info, + .pin = vmw_gem_object_pin, + .unpin = vmw_gem_object_unpin, + .get_sg_table = vmw_gem_object_get_sg_table, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, + .mmap = drm_gem_ttm_mmap, +}; + +/** + * vmw_gem_destroy - vmw buffer object destructor + * + * @bo: Pointer to the embedded struct ttm_buffer_object + */ +void vmw_gem_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); + + WARN_ON(vbo->dirty); + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); + vmw_bo_unmap(vbo); + drm_gem_object_release(&vbo->base.base); + kfree(vbo); +} + +int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, + struct drm_file *filp, + uint32_t size, + uint32_t *handle, + struct vmw_buffer_object **p_vbo) +{ + int ret; + + ret = vmw_bo_create(dev_priv, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : + &vmw_vram_sys_placement, + true, false, &vmw_gem_destroy, p_vbo); + + (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; + if (ret != 0) + goto out_no_bo; + + ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(&(*p_vbo)->base.base); +out_no_bo: + return ret; +} + + +int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + union drm_vmw_alloc_dmabuf_arg *arg = + (union drm_vmw_alloc_dmabuf_arg *)data; + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; + struct drm_vmw_dmabuf_rep *rep = &arg->rep; + struct vmw_buffer_object *vbo; + uint32_t handle; + int ret; + + ret = vmw_gem_object_create_with_handle(dev_priv, filp, + req->size, &handle, &vbo); + if (ret) + goto out_no_bo; + + rep->handle = handle; + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); + rep->cur_gmr_id = handle; + rep->cur_gmr_offset = 0; +out_no_bo: + return ret; +} + +#if defined(CONFIG_DEBUG_FS) + +static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m) +{ + const char *placement; + const char *type; + + switch (bo->base.resource->mem_type) { + case TTM_PL_SYSTEM: + placement = " CPU"; + break; + case VMW_PL_GMR: + placement = " GMR"; + break; + case VMW_PL_MOB: + placement = " MOB"; + break; + case VMW_PL_SYSTEM: + placement = "VCPU"; + break; + case TTM_PL_VRAM: + placement = "VRAM"; + break; + default: + placement = "None"; + break; + } + + switch (bo->base.type) { + case ttm_bo_type_device: + type = "device"; + break; + case ttm_bo_type_kernel: + type = "kernel"; + break; + case ttm_bo_type_sg: + type = "sg "; + break; + default: + type = "none "; + break; + } + + seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s", + id, bo->base.base.size, placement, type); + seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", + bo->base.priority, + bo->base.pin_count, + kref_read(&bo->base.base.refcount), + kref_read(&bo->base.kref)); + seq_puts(m, "\n"); +} + +static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) +{ + struct vmw_private *vdev = (struct vmw_private *)m->private; + struct drm_device *dev = &vdev->drm; + struct drm_file *file; + int r; + + r = mutex_lock_interruptible(&dev->filelist_mutex); + if (r) + return r; + + list_for_each_entry(file, &dev->filelist, lhead) { + struct task_struct *task; + struct drm_gem_object *gobj; + int id; + + /* + * Although we have a valid reference on file->pid, that does + * not guarantee that the task_struct who called get_pid() is + * still alive (e.g. get_pid(current) => fork() => exit()). + * Therefore, we need to protect this ->comm access using RCU. + */ + rcu_read_lock(); + task = pid_task(file->pid, PIDTYPE_PID); + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), + task ? task->comm : "<unknown>"); + rcu_read_unlock(); + + spin_lock(&file->table_lock); + idr_for_each_entry(&file->object_idr, gobj, id) { + struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj); + + vmw_bo_print_info(id, bo, m); + } + spin_unlock(&file->table_lock); + } + + mutex_unlock(&dev->filelist_mutex); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info); + +#endif + +void vmw_debugfs_gem_init(struct vmw_private *vdev) +{ +#if defined(CONFIG_DEBUG_FS) + struct drm_minor *minor = vdev->drm.primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev, + &vmw_debugfs_gem_info_fops); +#endif +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b2c4af331c9d..ebb4505a31a3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -42,6 +42,7 @@ struct vmwgfx_gmrid_man { uint32_t max_gmr_ids; uint32_t max_gmr_pages; uint32_t used_gmr_pages; + uint8_t type; }; static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *man) @@ -132,6 +133,18 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, kfree(res); } +static void vmw_gmrid_man_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); + + BUG_ON(gman->type != VMW_PL_GMR && gman->type != VMW_PL_MOB); + + drm_printf(printer, "%s's used: %u pages, max: %u pages, %u id's\n", + (gman->type == VMW_PL_MOB) ? "Mob" : "GMR", + gman->used_gmr_pages, gman->max_gmr_pages, gman->max_gmr_ids); +} + static const struct ttm_resource_manager_func vmw_gmrid_manager_func; int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) @@ -146,12 +159,12 @@ int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type) man = &gman->manager; man->func = &vmw_gmrid_manager_func; - /* TODO: This is most likely not correct */ man->use_tt = true; ttm_resource_manager_init(man, 0); spin_lock_init(&gman->lock); gman->used_gmr_pages = 0; ida_init(&gman->gmr_ida); + gman->type = type; switch (type) { case VMW_PL_GMR: @@ -190,4 +203,5 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) static const struct ttm_resource_manager_func vmw_gmrid_manager_func = { .alloc = vmw_gmrid_man_get_node, .free = vmw_gmrid_man_put_node, + .debug = vmw_gmrid_man_debug }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c new file mode 100644 index 000000000000..06aebc12774e --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.c @@ -0,0 +1,199 @@ +/* + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include <linux/export.h> +#include <linux/hash.h> +#include <linux/mm.h> +#include <linux/rculist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <drm/drm_print.h> + +#include "vmwgfx_hashtab.h" + +int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order) +{ + unsigned int size = 1 << order; + + ht->order = order; + ht->table = NULL; + if (size <= PAGE_SIZE / sizeof(*ht->table)) + ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL); + else + ht->table = vzalloc(array_size(size, sizeof(*ht->table))); + if (!ht->table) { + DRM_ERROR("Out of memory for hash table\n"); + return -ENOMEM; + } + return 0; +} + +void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + int count = 0; + + hashed_key = hash_long(key, ht->order); + DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) + DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); +} + +static struct hlist_node *vmwgfx_ht_find_key(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +static struct hlist_node *vmwgfx_ht_find_key_rcu(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + unsigned int hashed_key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + hlist_for_each_entry_rcu(entry, h_list, head) { + if (entry->key == key) + return &entry->head; + if (entry->key > key) + break; + } + return NULL; +} + +int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) +{ + struct vmwgfx_hash_item *entry; + struct hlist_head *h_list; + struct hlist_node *parent; + unsigned int hashed_key; + unsigned long key = item->key; + + hashed_key = hash_long(key, ht->order); + h_list = &ht->table[hashed_key]; + parent = NULL; + hlist_for_each_entry(entry, h_list, head) { + if (entry->key == key) + return -EINVAL; + if (entry->key > key) + break; + parent = &entry->head; + } + if (parent) + hlist_add_behind_rcu(&item->head, parent); + else + hlist_add_head_rcu(&item->head, h_list); + return 0; +} + +/* + * Just insert an item and return any "bits" bit key that hasn't been + * used before. + */ +int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add) +{ + int ret; + unsigned long mask = (1UL << bits) - 1; + unsigned long first, unshifted_key; + + unshifted_key = hash_long(seed, bits); + first = unshifted_key; + do { + item->key = (unshifted_key << shift) + add; + ret = vmwgfx_ht_insert_item(ht, item); + if (ret) + unshifted_key = (unshifted_key + 1) & mask; + } while (ret && (unshifted_key != first)); + + if (ret) { + DRM_ERROR("Available key bit space exhausted\n"); + return -EINVAL; + } + return 0; +} + +int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, + struct vmwgfx_hash_item **item) +{ + struct hlist_node *list; + + list = vmwgfx_ht_find_key_rcu(ht, key); + if (!list) + return -EINVAL; + + *item = hlist_entry(list, struct vmwgfx_hash_item, head); + return 0; +} + +int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key) +{ + struct hlist_node *list; + + list = vmwgfx_ht_find_key(ht, key); + if (list) { + hlist_del_init_rcu(list); + return 0; + } + return -EINVAL; +} + +int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item) +{ + hlist_del_init_rcu(&item->head); + return 0; +} + +void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht) +{ + if (ht->table) { + kvfree(ht->table); + ht->table = NULL; + } +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h new file mode 100644 index 000000000000..a9ce12922e21 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_hashtab.h @@ -0,0 +1,83 @@ +/* + * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +/* + * Simple open hash tab implementation. + * + * Authors: + * Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +/* + * TODO: Replace this hashtable with Linux' generic implementation + * from <linux/hashtable.h>. + */ + +#ifndef VMWGFX_HASHTAB_H +#define VMWGFX_HASHTAB_H + +#include <linux/list.h> + +#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) + +struct vmwgfx_hash_item { + struct hlist_node head; + unsigned long key; +}; + +struct vmwgfx_open_hash { + struct hlist_head *table; + u8 order; +}; + +int vmwgfx_ht_create(struct vmwgfx_open_hash *ht, unsigned int order); +int vmwgfx_ht_insert_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); +int vmwgfx_ht_just_insert_please(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item, + unsigned long seed, int bits, int shift, + unsigned long add); +int vmwgfx_ht_find_item(struct vmwgfx_open_hash *ht, unsigned long key, + struct vmwgfx_hash_item **item); + +void vmwgfx_ht_verbose_list(struct vmwgfx_open_hash *ht, unsigned long key); +int vmwgfx_ht_remove_key(struct vmwgfx_open_hash *ht, unsigned long key); +int vmwgfx_ht_remove_item(struct vmwgfx_open_hash *ht, struct vmwgfx_hash_item *item); +void vmwgfx_ht_remove(struct vmwgfx_open_hash *ht); + +/* + * RCU-safe interface + * + * The user of this API needs to make sure that two or more instances of the + * hash table manipulation functions are never run simultaneously. + * The lookup function vmwgfx_ht_find_item_rcu may, however, run simultaneously + * with any of the manipulation functions as long as it's called from within + * an RCU read-locked section. + */ +#define vmwgfx_ht_insert_item_rcu vmwgfx_ht_insert_item +#define vmwgfx_ht_just_insert_please_rcu vmwgfx_ht_just_insert_please +#define vmwgfx_ht_remove_key_rcu vmwgfx_ht_remove_key +#define vmwgfx_ht_remove_item_rcu vmwgfx_ht_remove_item +#define vmwgfx_ht_find_item_rcu vmwgfx_ht_find_item + +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 28af34ab6ed6..471da2b4c177 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -105,6 +105,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, case DRM_VMW_PARAM_SM5: param->value = has_sm5_context(dev_priv); break; + case DRM_VMW_PARAM_GL43: + param->value = has_gl43_context(dev_priv); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 74fa41909213..4e693e8de2c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -843,8 +843,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) drm_framebuffer_cleanup(framebuffer); vmw_surface_unreference(&vfbs->surface); - if (vfbs->base.user_obj) - ttm_base_object_unref(&vfbs->base.user_obj); kfree(vfbs); } @@ -989,6 +987,16 @@ out_err1: * Buffer-object framebuffer code */ +static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct vmw_framebuffer_bo *vfbd = + vmw_framebuffer_to_vfbd(fb); + + return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle); +} + static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_bo *vfbd = @@ -996,8 +1004,6 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) drm_framebuffer_cleanup(framebuffer); vmw_bo_unreference(&vfbd->buffer); - if (vfbd->base.user_obj) - ttm_base_object_unref(&vfbd->base.user_obj); kfree(vfbd); } @@ -1063,6 +1069,7 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, } static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { + .create_handle = vmw_framebuffer_bo_create_handle, .destroy = vmw_framebuffer_bo_destroy, .dirty = vmw_framebuffer_bo_dirty_ext, }; @@ -1188,7 +1195,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev, metadata.base_size.depth = 1; metadata.scanout = true; - ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); + ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out); if (ret) { DRM_ERROR("Failed to allocate proxy content buffer\n"); return ret; @@ -1251,6 +1258,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, goto out_err1; } + vfbd->base.base.obj[0] = &bo->base.base; drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); @@ -1368,34 +1376,13 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_private *dev_priv = vmw_priv(dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_framebuffer *vfb = NULL; struct vmw_surface *surface = NULL; struct vmw_buffer_object *bo = NULL; - struct ttm_base_object *user_obj; int ret; - /* - * Take a reference on the user object of the resource - * backing the kms fb. This ensures that user-space handle - * lookups on that resource will always work as long as - * it's registered with a kms framebuffer. This is important, - * since vmw_execbuf_process identifies resources in the - * command stream using user-space handles. - */ - - user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); - if (unlikely(user_obj == NULL)) { - DRM_ERROR("Could not locate requested kms frame buffer.\n"); - return ERR_PTR(-ENOENT); - } - - /** - * End conditioned code. - */ - /* returns either a bo or surface */ - ret = vmw_user_lookup_handle(dev_priv, tfile, + ret = vmw_user_lookup_handle(dev_priv, file_priv, mode_cmd->handles[0], &surface, &bo); if (ret) @@ -1428,10 +1415,8 @@ err_out: if (ret) { DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); - ttm_base_object_unref(&user_obj); return ERR_PTR(ret); - } else - vfb->user_obj = user_obj; + } return &vfb->base; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index bbc809f7bd8a..4d36e8507380 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -219,7 +219,6 @@ struct vmw_framebuffer { int (*pin)(struct vmw_framebuffer *fb); int (*unpin)(struct vmw_framebuffer *fb); bool bo; - struct ttm_base_object *user_obj; uint32_t user_handle; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index f9394207dd3c..2d91a44a3b22 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -146,9 +146,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, if (otable->size <= PAGE_SIZE) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&iter); - } else if (vsgt->num_regions == 1) { - mob->pt_level = SVGA3D_MOBFMT_RANGE; - mob->pt_root_page = vmw_piter_dma_addr(&iter); } else { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) @@ -413,10 +410,9 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) * @mob: Pointer to the mob the pagetable of which we want to * populate. * - * This function allocates memory to be used for the pagetable, and - * adjusts TTM memory accounting accordingly. Returns ENOMEM if - * memory resources aren't sufficient and may cause TTM buffer objects - * to be swapped out by using the TTM memory accounting function. + * This function allocates memory to be used for the pagetable. + * Returns ENOMEM if memory resources aren't sufficient and may + * cause TTM buffer objects to be swapped out. */ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob) @@ -624,9 +620,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv, if (likely(num_data_pages == 1)) { mob->pt_level = VMW_MOBFMT_PTDEPTH_0; mob->pt_root_page = vmw_piter_dma_addr(&data_iter); - } else if (vsgt->num_regions == 1) { - mob->pt_level = SVGA3D_MOBFMT_RANGE; - mob->pt_root_page = vmw_piter_dma_addr(&data_iter); } else if (unlikely(mob->pt_bo == NULL)) { ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index e50fb82a3030..2aceac7856e2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -28,7 +28,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/mem_encrypt.h> +#include <linux/cc_platform.h> #include <asm/hypervisor.h> #include <drm/drm_ioctl.h> @@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel, unsigned long msg_len = strlen(msg); /* HB port can't access encrypted memory. */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_high; u32 channel_id = (channel->channel_id << 16); @@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, unsigned long si, di, eax, ebx, ecx, edx; /* HB port can't access encrypted memory */ - if (hb && !mem_encrypt_active()) { + if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { unsigned long bp = channel->cookie_low; u32 channel_id = (channel->channel_id << 16); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index 54c5d16eb3b7..e9f5c89b4ca6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, goto out_unlock; } - ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); + ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 922317d1acc8..7bc99b1279f7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -57,7 +57,6 @@ enum vmw_bo_dirty_method { * @ref_count: Reference count for this structure * @bitmap_size: The size of the bitmap in bits. Typically equal to the * nuber of pages in the bo. - * @size: The accounting size for this struct. * @bitmap: A bitmap where each bit represents a page. A set bit means a * dirty page. */ @@ -68,7 +67,6 @@ struct vmw_bo_dirty { unsigned int change_count; unsigned int ref_count; unsigned long bitmap_size; - size_t size; unsigned long bitmap[]; }; @@ -233,12 +231,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; pgoff_t num_pages = vbo->base.resource->num_pages; - size_t size, acc_size; + size_t size; int ret; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; if (dirty) { dirty->ref_count++; @@ -246,20 +240,12 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) } size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); - acc_size = ttm_round_pot(size); - ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for buffer object " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(size, GFP_KERNEL); if (!dirty) { ret = -ENOMEM; goto out_no_dirty; } - dirty->size = acc_size; dirty->bitmap_size = num_pages; dirty->start = dirty->bitmap_size; dirty->end = 0; @@ -285,7 +271,6 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) return 0; out_no_dirty: - ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; } @@ -304,10 +289,7 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) struct vmw_bo_dirty *dirty = vbo->dirty; if (dirty && --dirty->ref_count == 0) { - size_t acc_size = dirty->size; - kvfree(dirty); - ttm_mem_global_free(&ttm_mem_glob, acc_size); vbo->dirty = NULL; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c index d9552a1efd13..2d72a5ee7c0c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -85,6 +85,5 @@ int vmw_prime_handle_to_fd(struct drm_device *dev, int *prime_fd) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 8d1e869cc196..708899ba2102 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -320,11 +320,12 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, * The pointer this pointed at by out_surf and out_buf needs to be null. */ int vmw_user_lookup_handle(struct vmw_private *dev_priv, - struct ttm_object_file *tfile, + struct drm_file *filp, uint32_t handle, struct vmw_surface **out_surf, struct vmw_buffer_object **out_buf) { + struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; struct vmw_resource *res; int ret; @@ -339,7 +340,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, } *out_surf = NULL; - ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); + ret = vmw_user_bo_lookup(filp, handle, out_buf); return ret; } @@ -362,14 +363,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - backup = kzalloc(sizeof(*backup), GFP_KERNEL); - if (unlikely(!backup)) - return -ENOMEM; - - ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, - res->func->backup_placement, - interruptible, false, - &vmw_bo_bo_free); + ret = vmw_bo_create(res->dev_priv, res->backup_size, + res->func->backup_placement, + interruptible, false, + &vmw_bo_bo_free, &backup); if (unlikely(ret != 0)) goto out_no_bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index bd157fb21b45..3004c7a719e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -442,19 +442,15 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, vps->bo_size = 0; } - vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); - if (!vps->bo) - return -ENOMEM; - vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_bo_init(dev_priv, vps->bo, size, - &vmw_vram_placement, - false, true, &vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, size, + &vmw_vram_placement, + false, true, &vmw_bo_bo_free, &vps->bo); vmw_overlay_resume_all(dev_priv); if (ret) { vps->bo = NULL; /* vmw_bo_init frees on error */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index b8dd62529104..108a496b5d18 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -53,10 +53,6 @@ struct vmw_dx_shader { struct list_head cotable_head; }; -static uint64_t vmw_user_shader_size; -static uint64_t vmw_shader_size; -static size_t vmw_shader_dx_size; - static void vmw_user_shader_free(struct vmw_resource *res); static struct vmw_resource * vmw_user_shader_base_to_res(struct ttm_base_object *base); @@ -79,7 +75,6 @@ static void vmw_dx_shader_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); -static uint64_t vmw_user_shader_size; static const struct vmw_user_resource_conv user_shader_conv = { .object_type = VMW_RES_SHADER, @@ -563,16 +558,14 @@ void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, * * @res: The shader resource * - * Frees the DX shader resource and updates memory accounting. + * Frees the DX shader resource. */ static void vmw_dx_shader_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); vmw_resource_unreference(&shader->cotable); kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); } /** @@ -594,30 +587,13 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_shader *shader; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_shader_dx_size) - vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); - if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, - &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - return ret; - } - shader = kmalloc(sizeof(*shader), GFP_KERNEL); if (!shader) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); return -ENOMEM; } @@ -669,21 +645,15 @@ static void vmw_user_shader_free(struct vmw_resource *res) { struct vmw_user_shader *ushader = container_of(res, struct vmw_user_shader, shader.res); - struct vmw_private *dev_priv = res->dev_priv; ttm_base_object_kfree(ushader, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); } static void vmw_shader_free(struct vmw_resource *res) { struct vmw_shader *shader = vmw_res_to_shader(res); - struct vmw_private *dev_priv = res->dev_priv; kfree(shader); - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); } /* @@ -706,8 +676,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->handle, - TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->handle); } static int vmw_user_shader_alloc(struct vmw_private *dev_priv, @@ -722,31 +691,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, { struct vmw_user_shader *ushader; struct vmw_resource *res, *tmp; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_user_shader_size == 0)) - vmw_user_shader_size = - ttm_round_pot(sizeof(struct vmw_user_shader)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_user_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out; - } - ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); if (unlikely(!ushader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_user_shader_size); ret = -ENOMEM; goto out; } @@ -769,7 +717,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &ushader->base, false, VMW_RES_SHADER, - &vmw_user_shader_base_release, NULL); + &vmw_user_shader_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -793,31 +741,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, { struct vmw_shader *shader; struct vmw_resource *res; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (unlikely(vmw_shader_size == 0)) - vmw_shader_size = - ttm_round_pot(sizeof(struct vmw_shader)) + - VMW_IDA_ACC_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_shader_size, - &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for shader " - "creation.\n"); - goto out_err; - } - shader = kzalloc(sizeof(*shader), GFP_KERNEL); if (unlikely(!shader)) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_shader_size); ret = -ENOMEM; goto out_err; } @@ -849,8 +776,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, int ret; if (buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_bo_lookup(tfile, buffer_handle, - &buffer, NULL); + ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer); if (unlikely(ret != 0)) { VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n"); return ret; @@ -966,13 +892,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, if (!vmw_shader_id_ok(user_key, shader_type)) return -EINVAL; - /* Allocate and pin a DMA buffer */ - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; - - ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement, - true, true, vmw_bo_bo_free); + ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement, + true, true, vmw_bo_bo_free, &buf); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c index 33b69a70cfe3..483ad544ea54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c @@ -32,12 +32,10 @@ * struct vmw_user_simple_resource - User-space simple resource struct * * @base: The TTM base object implementing user-space visibility. - * @account_size: How much memory was accounted for this object. * @simple: The embedded struct vmw_simple_resource. */ struct vmw_user_simple_resource { struct ttm_base_object base; - size_t account_size; struct vmw_simple_resource simple; /* * Nothing to be placed after @simple, since size of @simple is @@ -91,18 +89,15 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv, * * @res: The struct vmw_resource member of the simple resource object. * - * Frees memory and memory accounting for the object. + * Frees memory for the object. */ static void vmw_simple_resource_free(struct vmw_resource *res) { struct vmw_user_simple_resource *usimple = container_of(res, struct vmw_user_simple_resource, simple.res); - struct vmw_private *dev_priv = res->dev_priv; - size_t size = usimple->account_size; ttm_base_object_kfree(usimple, base); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -149,39 +144,19 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, struct vmw_resource *res; struct vmw_resource *tmp; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t alloc_size; - size_t account_size; int ret; alloc_size = offsetof(struct vmw_user_simple_resource, simple) + func->size; - account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE + - TTM_OBJ_EXTRA_SIZE; - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size, - &ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for %s" - " creation.\n", func->res_func.type_name); - - goto out_ret; - } usimple = kzalloc(alloc_size, GFP_KERNEL); if (!usimple) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - account_size); ret = -ENOMEM; goto out_ret; } usimple->simple.func = func; - usimple->account_size = account_size; res = &usimple->simple.res; usimple->base.shareable = false; usimple->base.tfile = NULL; @@ -197,7 +172,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &usimple->base, false, func->ttm_res_type, - &vmw_simple_resource_base_release, NULL); + &vmw_simple_resource_base_release); if (ret) { vmw_resource_unreference(&tmp); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 9efb4463ce99..4ea32b01efc0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -279,18 +279,15 @@ static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) * * @res: Pointer to a struct vmw_resource * - * Frees memory and memory accounting held by a struct vmw_view. + * Frees memory held by the struct vmw_view. */ static void vmw_view_res_free(struct vmw_resource *res) { struct vmw_view *view = vmw_view(res); - size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; - struct vmw_private *dev_priv = res->dev_priv; vmw_resource_unreference(&view->cotable); vmw_resource_unreference(&view->srf); kfree_rcu(view, rcu); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -327,10 +324,6 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, struct vmw_private *dev_priv = ctx->dev_priv; struct vmw_resource *res; struct vmw_view *view; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; size_t size; int ret; @@ -347,16 +340,8 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man, size = offsetof(struct vmw_view, cmd) + cmd_size; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for view creation\n"); - return ret; - } - view = kmalloc(size, GFP_KERNEL); if (!view) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); return -ENOMEM; } @@ -582,4 +567,8 @@ static void vmw_so_build_asserts(void) offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineUAView, sid)); + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != + offsetof(SVGA3dCmdDXDefineDepthStencilView_v2, sid)); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h index f48b84bfeeac..01c701e7466e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h @@ -93,7 +93,10 @@ static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) id == SVGA_3D_CMD_DX_DESTROY_UA_VIEW) return vmw_view_ua; - if (tmp > (u32)vmw_view_max) + if (id == SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2) + return vmw_view_ds; + + if (tmp > (u32)vmw_view_ds) return vmw_view_max; return (enum vmw_view_type) tmp; @@ -123,6 +126,7 @@ static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: return vmw_so_ds; case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: + case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2: case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: return vmw_so_rs; case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index d85310b2608d..8f0d651d0144 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1123,7 +1123,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, } if (!vps->surf) { - ret = vmw_gb_surface_define(dev_priv, 0, &metadata, + ret = vmw_gb_surface_define(dev_priv, &metadata, &vps->surf); if (ret != 0) { DRM_ERROR("Couldn't allocate STDU surface.\n"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c index c8efa4a6c995..2de97419d5c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c @@ -60,8 +60,6 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback, static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res, enum vmw_cmdbuf_res_state state); -static size_t vmw_streamoutput_size; - static const struct vmw_res_func vmw_dx_streamoutput_func = { .res_type = vmw_res_streamoutput, .needs_backup = true, @@ -254,12 +252,10 @@ vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man, static void vmw_dx_streamoutput_res_free(struct vmw_resource *res) { - struct vmw_private *dev_priv = res->dev_priv; struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res); vmw_resource_unreference(&so->cotable); kfree(so); - ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size); } static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res) @@ -284,27 +280,10 @@ int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man, struct vmw_dx_streamoutput *so; struct vmw_resource *res; struct vmw_private *dev_priv = ctx->dev_priv; - struct ttm_operation_ctx ttm_opt_ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; - if (!vmw_streamoutput_size) - vmw_streamoutput_size = ttm_round_pot(sizeof(*so)); - - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - vmw_streamoutput_size, &ttm_opt_ctx); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for streamout.\n"); - return ret; - } - so = kmalloc(sizeof(*so), GFP_KERNEL); if (!so) { - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_streamoutput_size); return -ENOMEM; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5d53a5f9d123..00e8e27e4884 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -45,16 +45,12 @@ * @prime: The TTM prime object. * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. - * @size: TTM accounting size for the surface. * @master: Master of the creating client. Used for security check. - * @backup_base: The TTM base object of the backup buffer. */ struct vmw_user_surface { struct ttm_prime_object prime; struct vmw_surface srf; - uint32_t size; struct drm_master *master; - struct ttm_base_object *backup_base; }; /** @@ -74,13 +70,11 @@ struct vmw_surface_offset { /** * struct vmw_surface_dirty - Surface dirty-tracker * @cache: Cached layout information of the surface. - * @size: Accounting size for the struct vmw_surface_dirty. * @num_subres: Number of subresources. * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. */ struct vmw_surface_dirty { struct vmw_surface_cache cache; - size_t size; u32 num_subres; SVGA3dBox boxes[]; }; @@ -129,9 +123,6 @@ static const struct vmw_user_resource_conv user_surface_conv = { const struct vmw_user_resource_conv *user_surface_converter = &user_surface_conv; - -static uint64_t vmw_user_surface_size; - static const struct vmw_res_func vmw_legacy_surface_func = { .res_type = vmw_res_surface, .needs_backup = false, @@ -359,7 +350,7 @@ static void vmw_surface_dma_encode(struct vmw_surface *srf, * vmw_surface. * * Destroys a the device surface associated with a struct vmw_surface if - * any, and adjusts accounting and resource count accordingly. + * any, and adjusts resource count accordingly. */ static void vmw_hw_surface_destroy(struct vmw_resource *res) { @@ -666,8 +657,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_surface *srf = vmw_res_to_srf(res); struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - struct vmw_private *dev_priv = srf->res.dev_priv; - uint32_t size = user_srf->size; WARN_ON_ONCE(res->dirty); if (user_srf->master) @@ -676,7 +665,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) kfree(srf->metadata.sizes); kfree(srf->snooper.image); ttm_prime_object_kfree(user_srf, prime); - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); } /** @@ -696,8 +684,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; - if (user_srf->backup_base) - ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } @@ -715,7 +701,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); + return ttm_ref_object_base_unref(tfile, arg->sid); } /** @@ -740,23 +726,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, struct drm_vmw_surface_create_req *req = &arg->req; struct drm_vmw_surface_arg *rep = &arg->rep; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; int i, j; uint32_t cur_bo_offset; struct drm_vmw_size *cur_size; struct vmw_surface_offset *cur_offset; uint32_t num_sizes; - uint32_t size; const SVGA3dSurfaceDesc *desc; - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - num_sizes = 0; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) @@ -768,10 +745,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, num_sizes == 0) return -EINVAL; - size = vmw_user_surface_size + - ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) + - ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset)); - desc = vmw_surface_get_desc(req->format); if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { VMW_DEBUG_USER("Invalid format %d for surface creation.\n", @@ -779,18 +752,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - size, &ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } srf = &user_srf->srf; @@ -805,7 +770,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, memcpy(metadata->mip_levels, req->mip_levels, sizeof(metadata->mip_levels)); metadata->num_sizes = num_sizes; - user_srf->size = size; metadata->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long) req->size_addr, @@ -883,22 +847,22 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (dev_priv->has_mob && req->shareable) { uint32_t backup_handle; - ret = vmw_user_bo_alloc(dev_priv, tfile, - res->backup_size, - true, - &backup_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_gem_object_create_with_handle(dev_priv, + file_priv, + res->backup_size, + &backup_handle, + &res->backup); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } + vmw_bo_reference(res->backup); } tmp = vmw_resource_reference(&srf->res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + &vmw_user_surface_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -916,8 +880,6 @@ out_no_offsets: kfree(metadata->sizes); out_no_sizes: ttm_prime_object_kfree(user_srf, prime); -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), size); out_unlock: return ret; } @@ -955,7 +917,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, VMW_DEBUG_USER("Referenced object is not a surface.\n"); goto out_bad_resource; } - if (handle_type != DRM_VMW_HANDLE_PRIME) { bool require_exist = false; @@ -980,8 +941,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, if (unlikely(drm_is_render_client(file_priv))) require_exist = true; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, - require_exist); + ret = ttm_ref_object_add(tfile, base, NULL, require_exist); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource; @@ -995,7 +955,7 @@ out_bad_resource: ttm_base_object_unref(&base); out_no_lookup: if (handle_type == DRM_VMW_HANDLE_PRIME) - (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); + (void) ttm_ref_object_base_unref(tfile, handle); return ret; } @@ -1045,7 +1005,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) { VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, srf->metadata.num_sizes); - ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, base->handle); ret = -EFAULT; } @@ -1459,7 +1419,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, struct vmw_resource *res; struct vmw_resource *tmp; int ret = 0; - uint32_t size; uint32_t backup_handle = 0; SVGA3dSurfaceAllFlags svga3d_flags_64 = SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, @@ -1506,12 +1465,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, return -EINVAL; } - if (unlikely(vmw_user_surface_size == 0)) - vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + - VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; - - size = vmw_user_surface_size; - metadata.flags = svga3d_flags_64; metadata.format = req->base.format; metadata.mip_levels[0] = req->base.mip_levels; @@ -1526,7 +1479,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, drm_vmw_surface_flag_scanout; /* Define a surface based on the parameters. */ - ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf); + ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); if (ret != 0) { VMW_DEBUG_USER("Failed to define surface.\n"); return ret; @@ -1539,9 +1492,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev, res = &user_srf->srf.res; if (req->base.buffer_handle != SVGA3D_INVALID_ID) { - ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle, - &res->backup, - &user_srf->backup_base); + ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, + &res->backup); if (ret == 0) { if (res->backup->base.base.size < res->backup_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); @@ -1554,14 +1506,15 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } } else if (req->base.drm_surface_flags & (drm_vmw_surface_flag_create_buffer | - drm_vmw_surface_flag_coherent)) - ret = vmw_user_bo_alloc(dev_priv, tfile, - res->backup_size, - req->base.drm_surface_flags & - drm_vmw_surface_flag_shareable, - &backup_handle, - &res->backup, - &user_srf->backup_base); + drm_vmw_surface_flag_coherent)) { + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, + res->backup_size, + &backup_handle, + &res->backup); + if (ret == 0) + vmw_bo_reference(res->backup); + + } if (unlikely(ret != 0)) { vmw_resource_unreference(&res); @@ -1593,7 +1546,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, req->base.drm_surface_flags & drm_vmw_surface_flag_shareable, VMW_RES_SURFACE, - &vmw_user_surface_base_release, NULL); + &vmw_user_surface_base_release); if (unlikely(ret != 0)) { vmw_resource_unreference(&tmp); @@ -1613,7 +1566,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, rep->buffer_size = 0; rep->buffer_handle = SVGA3D_INVALID_ID; } - vmw_resource_unreference(&res); out_unlock: @@ -1636,12 +1588,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_surface *srf; struct vmw_user_surface *user_srf; struct vmw_surface_metadata *metadata; struct ttm_base_object *base; - uint32_t backup_handle; + u32 backup_handle; int ret; ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, @@ -1658,14 +1609,12 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, metadata = &srf->metadata; mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ - ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); + ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base, + &backup_handle); mutex_unlock(&dev_priv->cmdbuf_mutex); - - if (unlikely(ret != 0)) { - DRM_ERROR("Could not add a reference to a GB surface " - "backup buffer.\n"); - (void) ttm_ref_object_base_unref(tfile, base->handle, - TTM_REF_USAGE); + if (ret != 0) { + drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", + req->sid); goto out_bad_resource; } @@ -1955,11 +1904,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) u32 num_mip; u32 num_subres; u32 num_samples; - size_t dirty_size, acc_size; - static struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; + size_t dirty_size; int ret; if (metadata->array_size) @@ -1973,14 +1918,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_subres = num_layers * num_mip; dirty_size = struct_size(dirty, boxes, num_subres); - acc_size = ttm_round_pot(dirty_size); - ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), - acc_size, &ctx); - if (ret) { - VMW_DEBUG_USER("Out of graphics memory for surface " - "dirty tracker.\n"); - return ret; - } dirty = kvzalloc(dirty_size, GFP_KERNEL); if (!dirty) { @@ -1990,13 +1927,12 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_samples = max_t(u32, 1, metadata->multisample_count); ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, - num_mip, num_layers, num_samples, - &dirty->cache); + num_mip, num_layers, num_samples, + &dirty->cache); if (ret) goto out_no_cache; dirty->num_subres = num_subres; - dirty->size = acc_size; res->dirty = (struct vmw_resource_dirty *) dirty; return 0; @@ -2004,7 +1940,6 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) out_no_cache: kvfree(dirty); out_no_dirty: - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); return ret; } @@ -2015,10 +1950,8 @@ static void vmw_surface_dirty_free(struct vmw_resource *res) { struct vmw_surface_dirty *dirty = (struct vmw_surface_dirty *) res->dirty; - size_t acc_size = dirty->size; kvfree(dirty); - ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); res->dirty = NULL; } @@ -2051,8 +1984,6 @@ static int vmw_surface_clean(struct vmw_resource *res) * vmw_gb_surface_define - Define a private GB surface * * @dev_priv: Pointer to a device private. - * @user_accounting_size: Used to track user-space memory usage, set - * to 0 for kernel mode only memory * @metadata: Metadata representing the surface to create. * @user_srf_out: allocated user_srf. Set to NULL on failure. * @@ -2062,17 +1993,12 @@ static int vmw_surface_clean(struct vmw_resource *res) * it available to user mode drivers. */ int vmw_gb_surface_define(struct vmw_private *dev_priv, - uint32_t user_accounting_size, const struct vmw_surface_metadata *req, struct vmw_surface **srf_out) { struct vmw_surface_metadata *metadata; struct vmw_user_surface *user_srf; struct vmw_surface *srf; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; u32 sample_count = 1; u32 num_layers = 1; int ret; @@ -2113,22 +2039,13 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, if (req->sizes != NULL) return -EINVAL; - ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), - user_accounting_size, &ctx); - if (ret != 0) { - if (ret != -ERESTARTSYS) - DRM_ERROR("Out of graphics memory for surface.\n"); - goto out_unlock; - } - user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); if (unlikely(!user_srf)) { ret = -ENOMEM; - goto out_no_user_srf; + goto out_unlock; } *srf_out = &user_srf->srf; - user_srf->size = user_accounting_size; user_srf->prime.base.shareable = false; user_srf->prime.base.tfile = NULL; @@ -2179,9 +2096,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv, return ret; -out_no_user_srf: - ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size); - out_unlock: return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c new file mode 100644 index 000000000000..b0005b03a617 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Copyright 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "vmwgfx_drv.h" + +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_device.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_resource.h> +#include <linux/slab.h> + + +static int vmw_sys_man_alloc(struct ttm_resource_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_resource **res) +{ + *res = kzalloc(sizeof(**res), GFP_KERNEL); + if (!*res) + return -ENOMEM; + + ttm_resource_init(bo, place, *res); + return 0; +} + +static void vmw_sys_man_free(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ + kfree(res); +} + +static const struct ttm_resource_manager_func vmw_sys_manager_func = { + .alloc = vmw_sys_man_alloc, + .free = vmw_sys_man_free, +}; + +int vmw_sys_man_init(struct vmw_private *dev_priv) +{ + struct ttm_device *bdev = &dev_priv->bdev; + struct ttm_resource_manager *man = + kzalloc(sizeof(*man), GFP_KERNEL); + + if (!man) + return -ENOMEM; + + man->use_tt = true; + man->func = &vmw_sys_manager_func; + + ttm_resource_manager_init(man, 0); + ttm_set_driver_manager(bdev, VMW_PL_SYSTEM, man); + ttm_resource_manager_set_used(man, true); + return 0; +} + +void vmw_sys_man_fini(struct vmw_private *dev_priv) +{ + struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, + VMW_PL_SYSTEM); + + ttm_resource_manager_evict_all(&dev_priv->bdev, man); + + ttm_resource_manager_set_used(man, false); + ttm_resource_manager_cleanup(man); + + ttm_set_driver_manager(&dev_priv->bdev, VMW_PL_SYSTEM, NULL); + kfree(man); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index e899a936a42a..b84ecc6d6611 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -92,6 +92,13 @@ static const struct ttm_place gmr_vram_placement_flags[] = { } }; +static const struct ttm_place vmw_sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .mem_type = VMW_PL_SYSTEM, + .flags = 0 +}; + struct ttm_placement vmw_vram_gmr_placement = { .num_placement = 2, .placement = vram_gmr_placement_flags, @@ -113,28 +120,11 @@ struct ttm_placement vmw_sys_placement = { .busy_placement = &sys_placement_flags }; -static const struct ttm_place evictable_placement_flags[] = { - { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_SYSTEM, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = TTM_PL_VRAM, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_GMR, - .flags = 0 - }, { - .fpfn = 0, - .lpfn = 0, - .mem_type = VMW_PL_MOB, - .flags = 0 - } +struct ttm_placement vmw_pt_sys_placement = { + .num_placement = 1, + .placement = &vmw_sys_placement_flags, + .num_busy_placement = 1, + .busy_placement = &vmw_sys_placement_flags }; static const struct ttm_place nonfixed_placement_flags[] = { @@ -156,13 +146,6 @@ static const struct ttm_place nonfixed_placement_flags[] = { } }; -struct ttm_placement vmw_evictable_placement = { - .num_placement = 4, - .placement = evictable_placement_flags, - .num_busy_placement = 1, - .busy_placement = &sys_placement_flags -}; - struct ttm_placement vmw_srf_placement = { .num_placement = 1, .num_busy_placement = 2, @@ -184,19 +167,6 @@ struct ttm_placement vmw_nonfixed_placement = { .busy_placement = &sys_placement_flags }; -struct vmw_ttm_tt { - struct ttm_tt dma_ttm; - struct vmw_private *dev_priv; - int gmr_id; - struct vmw_mob *mob; - int mem_type; - struct sg_table sgt; - struct vmw_sg_table vsgt; - uint64_t sg_alloc_size; - bool mapped; - bool bound; -}; - const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); /** @@ -317,17 +287,8 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) { struct vmw_private *dev_priv = vmw_tt->dev_priv; - struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct vmw_sg_table *vsgt = &vmw_tt->vsgt; - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; - struct vmw_piter iter; - dma_addr_t old; int ret = 0; - static size_t sgl_size; - static size_t sgt_size; if (vmw_tt->mapped) return 0; @@ -336,20 +297,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) vsgt->pages = vmw_tt->dma_ttm.pages; vsgt->num_pages = vmw_tt->dma_ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; - vsgt->sgt = &vmw_tt->sgt; + vsgt->sgt = NULL; switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate: - if (unlikely(!sgl_size)) { - sgl_size = ttm_round_pot(sizeof(struct scatterlist)); - sgt_size = ttm_round_pot(sizeof(struct sg_table)); - } - vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; - ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx); - if (unlikely(ret != 0)) - return ret; - + vsgt->sgt = &vmw_tt->sgt; ret = sg_alloc_table_from_pages_segment( &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long)vsgt->num_pages << PAGE_SHIFT, @@ -357,15 +310,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (ret) goto out_sg_alloc_fail; - if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { - uint64_t over_alloc = - sgl_size * (vsgt->num_pages - - vmw_tt->sgt.orig_nents); - - ttm_mem_global_free(glob, over_alloc); - vmw_tt->sg_alloc_size -= over_alloc; - } - ret = vmw_ttm_map_for_dma(vmw_tt); if (unlikely(ret != 0)) goto out_map_fail; @@ -375,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) break; } - old = ~((dma_addr_t) 0); - vmw_tt->vsgt.num_regions = 0; - for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { - dma_addr_t cur = vmw_piter_dma_addr(&iter); - - if (cur != old + PAGE_SIZE) - vmw_tt->vsgt.num_regions++; - old = cur; - } - vmw_tt->mapped = true; return 0; @@ -392,7 +326,6 @@ out_map_fail: sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; out_sg_alloc_fail: - ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); return ret; } @@ -418,8 +351,6 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) vmw_ttm_unmap_from_dma(vmw_tt); sg_free_table(vmw_tt->vsgt.sgt); vmw_tt->vsgt.sgt = NULL; - ttm_mem_global_free(vmw_mem_glob(dev_priv), - vmw_tt->sg_alloc_size); break; default: break; @@ -484,6 +415,9 @@ static int vmw_ttm_bind(struct ttm_device *bdev, &vmw_be->vsgt, ttm->num_pages, vmw_be->gmr_id); break; + case VMW_PL_SYSTEM: + /* Nothing to be done for a system bind */ + break; default: BUG(); } @@ -507,6 +441,8 @@ static void vmw_ttm_unbind(struct ttm_device *bdev, case VMW_PL_MOB: vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); break; + case VMW_PL_SYSTEM: + break; default: BUG(); } @@ -534,7 +470,6 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) static int vmw_ttm_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - unsigned int i; int ret; /* TODO: maybe completely drop this ? */ @@ -542,22 +477,7 @@ static int vmw_ttm_populate(struct ttm_device *bdev, return 0; ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); - if (ret) - return ret; - - for (i = 0; i < ttm->num_pages; ++i) { - ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE, ctx); - if (ret) - goto error; - } - return 0; -error: - while (i--) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); return ret; } @@ -566,7 +486,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); - unsigned int i; vmw_ttm_unbind(bdev, ttm); @@ -577,10 +496,6 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev, vmw_ttm_unmap_dma(vmw_tt); - for (i = 0; i < ttm->num_pages; ++i) - ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], - PAGE_SIZE); - ttm_pool_free(&bdev->pool, ttm); } @@ -624,6 +539,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource * switch (mem->mem_type) { case TTM_PL_SYSTEM: + case VMW_PL_SYSTEM: case VMW_PL_GMR: case VMW_PL_MOB: return 0; @@ -670,6 +586,11 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo) (void) ttm_bo_wait(bo, false, false); } +static bool vmw_memtype_is_system(uint32_t mem_type) +{ + return mem_type == TTM_PL_SYSTEM || mem_type == VMW_PL_SYSTEM; +} + static int vmw_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, @@ -680,7 +601,7 @@ static int vmw_move(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); int ret; - if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { + if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) { ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); if (ret) return ret; @@ -689,7 +610,7 @@ static int vmw_move(struct ttm_buffer_object *bo, vmw_move_notify(bo, bo->resource, new_mem); if (old_man->use_tt && new_man->use_tt) { - if (bo->resource->mem_type == TTM_PL_SYSTEM) { + if (vmw_memtype_is_system(bo->resource->mem_type)) { ttm_bo_move_null(bo, new_mem); return 0; } @@ -736,7 +657,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, int ret; ret = vmw_bo_create_kernel(dev_priv, bo_size, - &vmw_sys_placement, + &vmw_pt_sys_placement, &bo); if (unlikely(ret != 0)) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 0a4c340252ec..265f7c48d856 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -27,30 +27,44 @@ #include "vmwgfx_drv.h" -static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, - unsigned long offset, - unsigned long pages) +static int vmw_bo_vm_lookup(struct ttm_device *bdev, + struct drm_file *filp, + unsigned long offset, + unsigned long pages, + struct ttm_buffer_object **p_bo) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); struct drm_device *drm = &dev_priv->drm; struct drm_vma_offset_node *node; - struct ttm_buffer_object *bo = NULL; + int ret; + + *p_bo = NULL; drm_vma_offset_lock_lookup(bdev->vma_manager); node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); if (likely(node)) { - bo = container_of(node, struct ttm_buffer_object, + *p_bo = container_of(node, struct ttm_buffer_object, base.vma_node); - bo = ttm_bo_get_unless_zero(bo); + *p_bo = ttm_bo_get_unless_zero(*p_bo); } drm_vma_offset_unlock_lookup(bdev->vma_manager); - if (!bo) + if (!*p_bo) { drm_err(drm, "Could not find buffer object to map\n"); + return -EINVAL; + } + + if (!drm_vma_node_is_allowed(node, filp)) { + ret = -EACCES; + goto out_no_access; + } - return bo; + return 0; +out_no_access: + ttm_bo_put(*p_bo); + return ret; } int vmw_mmap(struct file *filp, struct vm_area_struct *vma) @@ -64,7 +78,6 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_device *bdev = &dev_priv->bdev; struct ttm_buffer_object *bo; int ret; @@ -72,13 +85,9 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) return -EINVAL; - bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); - if (unlikely(!bo)) - return -EINVAL; - - ret = vmw_user_bo_verify_access(bo, tfile); + ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo); if (unlikely(ret != 0)) - goto out_unref; + return ret; ret = ttm_bo_mmap_obj(vma, bo); if (unlikely(ret != 0)) @@ -99,38 +108,3 @@ out_unref: return ret; } -/* struct vmw_validation_mem callback */ -static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size) -{ - static struct ttm_operation_ctx ctx = {.interruptible = false, - .no_wait_gpu = false}; - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx); -} - -/* struct vmw_validation_mem callback */ -static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size) -{ - struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm); - - return ttm_mem_global_free(vmw_mem_glob(dev_priv), size); -} - -/** - * vmw_validation_mem_init_ttm - Interface the validation memory tracker - * to ttm. - * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private - * rather than a struct vmw_validation_mem is to make sure assumption in the - * callbacks that struct vmw_private derives from struct vmw_validation_mem - * holds true. - * @gran: The recommended allocation granularity - */ -void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran) -{ - struct vmw_validation_mem *vvm = &dev_priv->vvm; - - vvm->reserve_mem = vmw_vmt_reserve; - vvm->unreserve_mem = vmw_vmt_unreserve; - vvm->gran = gran; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c index ebc1d83c34b4..6ad744ae07f5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c @@ -117,7 +117,7 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - arg->stream_id, TTM_REF_USAGE); + arg->stream_id); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index b09094b50c5d..f46891012be3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -29,6 +29,9 @@ #include "vmwgfx_validation.h" #include "vmwgfx_drv.h" + +#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) + /** * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. @@ -43,7 +46,7 @@ */ struct vmw_validation_bo_node { struct ttm_validate_buffer base; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; unsigned int coherent_count; u32 as_mob : 1; u32 cpu_blit : 1; @@ -72,7 +75,7 @@ struct vmw_validation_bo_node { */ struct vmw_validation_res_node { struct list_head head; - struct drm_hash_item hash; + struct vmwgfx_hash_item hash; struct vmw_resource *res; struct vmw_buffer_object *new_backup; unsigned long new_backup_offset; @@ -113,13 +116,8 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, struct page *page; if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { - int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran); - - if (ret) - return NULL; - - ctx->vm_size_left += ctx->vm->gran; - ctx->total_mem += ctx->vm->gran; + ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; + ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; } page = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -159,7 +157,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) ctx->mem_size_left = 0; if (ctx->vm && ctx->total_mem) { - ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem); ctx->total_mem = 0; ctx->vm_size_left = 0; } @@ -184,9 +181,9 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, return NULL; if (ctx->ht) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; - if (!drm_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) vbo, &hash)) bo_node = container_of(hash, typeof(*bo_node), hash); } else { struct vmw_validation_bo_node *entry; @@ -221,9 +218,9 @@ vmw_validation_find_res_dup(struct vmw_validation_context *ctx, return NULL; if (ctx->ht) { - struct drm_hash_item *hash; + struct vmwgfx_hash_item *hash; - if (!drm_ht_find_item(ctx->ht, (unsigned long) res, &hash)) + if (!vmwgfx_ht_find_item(ctx->ht, (unsigned long) res, &hash)) res_node = container_of(hash, typeof(*res_node), hash); } else { struct vmw_validation_res_node *entry; @@ -280,7 +277,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, if (ctx->ht) { bo_node->hash.key = (unsigned long) vbo; - ret = drm_ht_insert_item(ctx->ht, &bo_node->hash); + ret = vmwgfx_ht_insert_item(ctx->ht, &bo_node->hash); if (ret) { DRM_ERROR("Failed to initialize a buffer " "validation entry.\n"); @@ -335,7 +332,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx, if (ctx->ht) { node->hash.key = (unsigned long) res; - ret = drm_ht_insert_item(ctx->ht, &node->hash); + ret = vmwgfx_ht_insert_item(ctx->ht, &node->hash); if (ret) { DRM_ERROR("Failed to initialize a resource validation " "entry.\n"); @@ -688,13 +685,13 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx) return; list_for_each_entry(entry, &ctx->bo_list, base.head) - (void) drm_ht_remove_item(ctx->ht, &entry->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &entry->hash); list_for_each_entry(val, &ctx->resource_list, head) - (void) drm_ht_remove_item(ctx->ht, &val->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); list_for_each_entry(val, &ctx->resource_ctx_list, head) - (void) drm_ht_remove_item(ctx->ht, &val->hash); + (void) vmwgfx_ht_remove_item(ctx->ht, &val->hash); ctx->ht = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 739906d1b3eb..f21df053882b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -31,29 +31,15 @@ #include <linux/list.h> #include <linux/ww_mutex.h> -#include <drm/drm_hashtab.h> #include <drm/ttm/ttm_execbuf_util.h> +#include "vmwgfx_hashtab.h" + #define VMW_RES_DIRTY_NONE 0 #define VMW_RES_DIRTY_SET BIT(0) #define VMW_RES_DIRTY_CLEAR BIT(1) /** - * struct vmw_validation_mem - Custom interface to provide memory reservations - * for the validation code. - * @reserve_mem: Callback to reserve memory - * @unreserve_mem: Callback to unreserve memory - * @gran: Reservation granularity. Contains a hint how much memory should - * be reserved in each call to @reserve_mem(). A slow implementation may want - * reservation to be done in large batches. - */ -struct vmw_validation_mem { - int (*reserve_mem)(struct vmw_validation_mem *m, size_t size); - void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size); - size_t gran; -}; - -/** * struct vmw_validation_context - Per command submission validation context * @ht: Hash table used to find resource- or buffer object duplicates * @resource_list: List head for resource validation metadata @@ -73,7 +59,7 @@ struct vmw_validation_mem { * @total_mem: Amount of reserved memory. */ struct vmw_validation_context { - struct drm_open_hash *ht; + struct vmwgfx_open_hash *ht; struct list_head resource_list; struct list_head resource_ctx_list; struct list_head bo_list; @@ -129,21 +115,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx) } /** - * vmw_validation_set_val_mem - Register a validation mem object for - * validation memory reservation - * @ctx: The validation context - * @vm: Pointer to a struct vmw_validation_mem - * - * Must be set before the first attempt to allocate validation memory. - */ -static inline void -vmw_validation_set_val_mem(struct vmw_validation_context *ctx, - struct vmw_validation_mem *vm) -{ - ctx->vm = vm; -} - -/** * vmw_validation_set_ht - Register a hash table for duplicate finding * @ctx: The validation context * @ht: Pointer to a hash table to use for duplicate finding @@ -151,7 +122,7 @@ vmw_validation_set_val_mem(struct vmw_validation_context *ctx, * available at validation context declaration time */ static inline void vmw_validation_set_ht(struct vmw_validation_context *ctx, - struct drm_open_hash *ht) + struct vmwgfx_open_hash *ht) { ctx->ht = ht; } @@ -190,22 +161,6 @@ vmw_validation_bo_fence(struct vmw_validation_context *ctx, } /** - * vmw_validation_context_init - Initialize a validation context - * @ctx: Pointer to the validation context to initialize - * - * This function initializes a validation context with @merge_dups set - * to false - */ -static inline void -vmw_validation_context_init(struct vmw_validation_context *ctx) -{ - memset(ctx, 0, sizeof(*ctx)); - INIT_LIST_HEAD(&ctx->resource_list); - INIT_LIST_HEAD(&ctx->resource_ctx_list); - INIT_LIST_HEAD(&ctx->bo_list); -} - -/** * vmw_validation_align - Align a validation memory allocation * @val: The size to be aligned * diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 9f14d99c763c..e63088c2121d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -469,19 +469,7 @@ static void xen_drm_drv_release(struct drm_device *dev) kfree(drm_info); } -static const struct file_operations xen_drm_dev_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .poll = drm_poll, - .read = drm_read, - .llseek = no_llseek, - .mmap = xen_drm_front_gem_mmap, -}; +DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, @@ -489,7 +477,7 @@ static const struct drm_driver xen_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, - .gem_prime_mmap = xen_drm_front_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, .name = "xendrm-du", @@ -773,6 +761,7 @@ static struct xenbus_driver xen_driver = { .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = displback_changed, + .not_essential = true, }; static int __init xen_drv_init(void) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index b293c67230ef..dd358ba2bf8e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -57,6 +57,47 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj) xen_obj->pages = NULL; } +static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, + struct vm_area_struct *vma) +{ + struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); + int ret; + + vma->vm_ops = gem_obj->funcs->vm_ops; + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_pgoff = 0; + + /* + * According to Xen on ARM ABI (xen/include/public/arch-arm.h): + * all memory which is shared with other entities in the system + * (including the hypervisor and other guests) must reside in memory + * which is mapped as Normal Inner Write-Back Outer Write-Back + * Inner-Shareable. + */ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + /* + * vm_operations_struct.fault handler will be called if CPU access + * to VM is here. For GPUs this isn't the case, because CPU doesn't + * touch the memory. Insert pages now, so both CPU and GPU are happy. + * + * FIXME: as we insert all the pages now then no .fault handler must + * be called, so don't provide one + */ + ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); + if (ret < 0) + DRM_ERROR("Failed to map pages into vma: %d\n", ret); + + return ret; +} + static const struct vm_operations_struct xen_drm_drv_vm_ops = { .open = drm_gem_vm_open, .close = drm_gem_vm_close, @@ -67,6 +108,7 @@ static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = { .get_sg_table = xen_drm_front_gem_get_sg_table, .vmap = xen_drm_front_gem_prime_vmap, .vunmap = xen_drm_front_gem_prime_vunmap, + .mmap = xen_drm_front_gem_object_mmap, .vm_ops = &xen_drm_drv_vm_ops, }; @@ -238,58 +280,6 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, return &xen_obj->base; } -static int gem_mmap_obj(struct xen_gem_object *xen_obj, - struct vm_area_struct *vma) -{ - int ret; - - /* - * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the - * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map - * the whole buffer. - */ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_pgoff = 0; - /* - * According to Xen on ARM ABI (xen/include/public/arch-arm.h): - * all memory which is shared with other entities in the system - * (including the hypervisor and other guests) must reside in memory - * which is mapped as Normal Inner Write-Back Outer Write-Back - * Inner-Shareable. - */ - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - - /* - * vm_operations_struct.fault handler will be called if CPU access - * to VM is here. For GPUs this isn't the case, because CPU - * doesn't touch the memory. Insert pages now, so both CPU and GPU are - * happy. - * FIXME: as we insert all the pages now then no .fault handler must - * be called, so don't provide one - */ - ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); - if (ret < 0) - DRM_ERROR("Failed to map pages into vma: %d\n", ret); - - return ret; -} - -int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct xen_gem_object *xen_obj; - struct drm_gem_object *gem_obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret < 0) - return ret; - - gem_obj = vma->vm_private_data; - xen_obj = to_xen_gem_obj(gem_obj); - return gem_mmap_obj(xen_obj, vma); -} - int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); @@ -313,17 +303,3 @@ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, { vunmap(map->vaddr); } - -int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, - struct vm_area_struct *vma) -{ - struct xen_gem_object *xen_obj; - int ret; - - ret = drm_gem_mmap_obj(gem_obj, gem_obj->size, vma); - if (ret < 0) - return ret; - - xen_obj = to_xen_gem_obj(gem_obj); - return gem_mmap_obj(xen_obj, vma); -} diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h index a4e67d0a149c..eaea470f7001 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.h +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h @@ -15,9 +15,7 @@ struct dma_buf_attachment; struct dma_buf_map; struct drm_device; struct drm_gem_object; -struct file; struct sg_table; -struct vm_area_struct; struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, size_t size); @@ -33,15 +31,10 @@ struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj); void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj); -int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); - int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map); void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map); -int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, - struct vm_area_struct *vma); - #endif /* __XEN_DRM_FRONT_GEM_H */ diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index c3d08269faa9..d8d38d86d5c6 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -7,7 +7,6 @@ config DRM_ZYNQMP_DPSUB depends on XILINX_ZYNQMP_DPDMA select DMA_ENGINE select DRM_GEM_CMA_HELPER - select DRM_KMS_CMA_HELPER select DRM_KMS_HELPER select GENERIC_PHY help diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c index 6941add95d0f..ecab72882192 100644 --- a/drivers/gpu/host1x/fence.c +++ b/drivers/gpu/host1x/fence.c @@ -15,7 +15,7 @@ #include "intr.h" #include "syncpt.h" -DEFINE_SPINLOCK(lock); +static DEFINE_SPINLOCK(lock); struct host1x_syncpt_fence { struct dma_fence base; @@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold) return ERR_PTR(-ENOMEM); fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL); - if (!fence->waiter) + if (!fence->waiter) { + kfree(fence); return ERR_PTR(-ENOMEM); + } fence->sp = sp; fence->threshold = threshold; diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 8ae301eef643..a9639d098893 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c @@ -259,10 +259,24 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code, cfg->data_width = IPU_CSI_DATA_WIDTH_8; break; case MEDIA_BUS_FMT_UYVY8_1X16: + if (mbus_type == V4L2_MBUS_BT656) { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + } else { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + } + cfg->mipi_dt = MIPI_DT_YUV422; + break; case MEDIA_BUS_FMT_YUYV8_1X16: - cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + if (mbus_type == V4L2_MBUS_BT656) { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV; + cfg->data_width = IPU_CSI_DATA_WIDTH_8; + } else { + cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; + cfg->data_width = IPU_CSI_DATA_WIDTH_16; + } cfg->mipi_dt = MIPI_DT_YUV422; - cfg->data_width = IPU_CSI_DATA_WIDTH_16; break; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: @@ -332,7 +346,7 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, const struct v4l2_mbus_config *mbus_cfg, const struct v4l2_mbus_framefmt *mbus_fmt) { - int ret; + int ret, is_bt1120; memset(csicfg, 0, sizeof(*csicfg)); @@ -353,11 +367,18 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg, break; case V4L2_MBUS_BT656: csicfg->ext_vsync = 0; + /* UYVY10_1X20 etc. should be supported as well */ + is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 || + mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16; if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) || mbus_fmt->field == V4L2_FIELD_ALTERNATE) - csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; + csicfg->clk_mode = is_bt1120 ? + IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR : + IPU_CSI_CLK_MODE_CCIR656_INTERLACED; else - csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; + csicfg->clk_mode = is_bt1120 ? + IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR : + IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; break; case V4L2_MBUS_CSI2_DPHY: /* |